1 /*
2 * Copyright 2012 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "ac_shader_util.h"
25 #include "ac_gpu_info.h"
26
27 #include "sid.h"
28 #include "u_math.h"
29
30 #include <assert.h>
31 #include <stdlib.h>
32 #include <string.h>
33
ac_get_spi_shader_z_format(bool writes_z,bool writes_stencil,bool writes_samplemask,bool writes_mrt0_alpha)34 unsigned ac_get_spi_shader_z_format(bool writes_z, bool writes_stencil, bool writes_samplemask,
35 bool writes_mrt0_alpha)
36 {
37 /* If writes_mrt0_alpha is true, one other flag must be true too. */
38 assert(!writes_mrt0_alpha || writes_z || writes_stencil || writes_samplemask);
39
40 if (writes_z || writes_mrt0_alpha) {
41 /* Z needs 32 bits. */
42 if (writes_samplemask || writes_mrt0_alpha)
43 return V_028710_SPI_SHADER_32_ABGR;
44 else if (writes_stencil)
45 return V_028710_SPI_SHADER_32_GR;
46 else
47 return V_028710_SPI_SHADER_32_R;
48 } else if (writes_stencil || writes_samplemask) {
49 /* Both stencil and sample mask need only 16 bits. */
50 return V_028710_SPI_SHADER_UINT16_ABGR;
51 } else {
52 return V_028710_SPI_SHADER_ZERO;
53 }
54 }
55
ac_get_cb_shader_mask(unsigned spi_shader_col_format)56 unsigned ac_get_cb_shader_mask(unsigned spi_shader_col_format)
57 {
58 unsigned i, cb_shader_mask = 0;
59
60 for (i = 0; i < 8; i++) {
61 switch ((spi_shader_col_format >> (i * 4)) & 0xf) {
62 case V_028714_SPI_SHADER_ZERO:
63 break;
64 case V_028714_SPI_SHADER_32_R:
65 cb_shader_mask |= 0x1 << (i * 4);
66 break;
67 case V_028714_SPI_SHADER_32_GR:
68 cb_shader_mask |= 0x3 << (i * 4);
69 break;
70 case V_028714_SPI_SHADER_32_AR:
71 cb_shader_mask |= 0x9u << (i * 4);
72 break;
73 case V_028714_SPI_SHADER_FP16_ABGR:
74 case V_028714_SPI_SHADER_UNORM16_ABGR:
75 case V_028714_SPI_SHADER_SNORM16_ABGR:
76 case V_028714_SPI_SHADER_UINT16_ABGR:
77 case V_028714_SPI_SHADER_SINT16_ABGR:
78 case V_028714_SPI_SHADER_32_ABGR:
79 cb_shader_mask |= 0xfu << (i * 4);
80 break;
81 default:
82 assert(0);
83 }
84 }
85 return cb_shader_mask;
86 }
87
88 /**
89 * Calculate the appropriate setting of VGT_GS_MODE when \p shader is a
90 * geometry shader.
91 */
ac_vgt_gs_mode(unsigned gs_max_vert_out,enum amd_gfx_level gfx_level)92 uint32_t ac_vgt_gs_mode(unsigned gs_max_vert_out, enum amd_gfx_level gfx_level)
93 {
94 unsigned cut_mode;
95
96 assert (gfx_level < GFX11);
97
98 if (gs_max_vert_out <= 128) {
99 cut_mode = V_028A40_GS_CUT_128;
100 } else if (gs_max_vert_out <= 256) {
101 cut_mode = V_028A40_GS_CUT_256;
102 } else if (gs_max_vert_out <= 512) {
103 cut_mode = V_028A40_GS_CUT_512;
104 } else {
105 assert(gs_max_vert_out <= 1024);
106 cut_mode = V_028A40_GS_CUT_1024;
107 }
108
109 return S_028A40_MODE(V_028A40_GS_SCENARIO_G) | S_028A40_CUT_MODE(cut_mode) |
110 S_028A40_ES_WRITE_OPTIMIZE(gfx_level <= GFX8) | S_028A40_GS_WRITE_OPTIMIZE(1) |
111 S_028A40_ONCHIP(gfx_level >= GFX9 ? 1 : 0);
112 }
113
114 /// Translate a (dfmt, nfmt) pair into a chip-appropriate combined format
115 /// value for LLVM8+ tbuffer intrinsics.
ac_get_tbuffer_format(enum amd_gfx_level gfx_level,unsigned dfmt,unsigned nfmt)116 unsigned ac_get_tbuffer_format(enum amd_gfx_level gfx_level, unsigned dfmt, unsigned nfmt)
117 {
118 // Some games try to access vertex buffers without a valid format.
119 // This is a game bug, but we should still handle it gracefully.
120 if (dfmt == V_008F0C_GFX10_FORMAT_INVALID)
121 return V_008F0C_GFX10_FORMAT_INVALID;
122
123 if (gfx_level >= GFX11) {
124 switch (dfmt) {
125 default:
126 unreachable("bad dfmt");
127 case V_008F0C_BUF_DATA_FORMAT_INVALID:
128 return V_008F0C_GFX11_FORMAT_INVALID;
129
130 case V_008F0C_BUF_DATA_FORMAT_8:
131 switch (nfmt) {
132 case V_008F0C_BUF_NUM_FORMAT_UNORM:
133 return V_008F0C_GFX11_FORMAT_8_UNORM;
134 case V_008F0C_BUF_NUM_FORMAT_SNORM:
135 return V_008F0C_GFX11_FORMAT_8_SNORM;
136 case V_008F0C_BUF_NUM_FORMAT_USCALED:
137 return V_008F0C_GFX11_FORMAT_8_USCALED;
138 case V_008F0C_BUF_NUM_FORMAT_SSCALED:
139 return V_008F0C_GFX11_FORMAT_8_SSCALED;
140 default:
141 unreachable("bad nfmt");
142 case V_008F0C_BUF_NUM_FORMAT_UINT:
143 return V_008F0C_GFX11_FORMAT_8_UINT;
144 case V_008F0C_BUF_NUM_FORMAT_SINT:
145 return V_008F0C_GFX11_FORMAT_8_SINT;
146 }
147
148 case V_008F0C_BUF_DATA_FORMAT_8_8:
149 switch (nfmt) {
150 case V_008F0C_BUF_NUM_FORMAT_UNORM:
151 return V_008F0C_GFX11_FORMAT_8_8_UNORM;
152 case V_008F0C_BUF_NUM_FORMAT_SNORM:
153 return V_008F0C_GFX11_FORMAT_8_8_SNORM;
154 case V_008F0C_BUF_NUM_FORMAT_USCALED:
155 return V_008F0C_GFX11_FORMAT_8_8_USCALED;
156 case V_008F0C_BUF_NUM_FORMAT_SSCALED:
157 return V_008F0C_GFX11_FORMAT_8_8_SSCALED;
158 default:
159 unreachable("bad nfmt");
160 case V_008F0C_BUF_NUM_FORMAT_UINT:
161 return V_008F0C_GFX11_FORMAT_8_8_UINT;
162 case V_008F0C_BUF_NUM_FORMAT_SINT:
163 return V_008F0C_GFX11_FORMAT_8_8_SINT;
164 }
165
166 case V_008F0C_BUF_DATA_FORMAT_8_8_8_8:
167 switch (nfmt) {
168 case V_008F0C_BUF_NUM_FORMAT_UNORM:
169 return V_008F0C_GFX11_FORMAT_8_8_8_8_UNORM;
170 case V_008F0C_BUF_NUM_FORMAT_SNORM:
171 return V_008F0C_GFX11_FORMAT_8_8_8_8_SNORM;
172 case V_008F0C_BUF_NUM_FORMAT_USCALED:
173 return V_008F0C_GFX11_FORMAT_8_8_8_8_USCALED;
174 case V_008F0C_BUF_NUM_FORMAT_SSCALED:
175 return V_008F0C_GFX11_FORMAT_8_8_8_8_SSCALED;
176 default:
177 unreachable("bad nfmt");
178 case V_008F0C_BUF_NUM_FORMAT_UINT:
179 return V_008F0C_GFX11_FORMAT_8_8_8_8_UINT;
180 case V_008F0C_BUF_NUM_FORMAT_SINT:
181 return V_008F0C_GFX11_FORMAT_8_8_8_8_SINT;
182 }
183
184 case V_008F0C_BUF_DATA_FORMAT_16:
185 switch (nfmt) {
186 case V_008F0C_BUF_NUM_FORMAT_UNORM:
187 return V_008F0C_GFX11_FORMAT_16_UNORM;
188 case V_008F0C_BUF_NUM_FORMAT_SNORM:
189 return V_008F0C_GFX11_FORMAT_16_SNORM;
190 case V_008F0C_BUF_NUM_FORMAT_USCALED:
191 return V_008F0C_GFX11_FORMAT_16_USCALED;
192 case V_008F0C_BUF_NUM_FORMAT_SSCALED:
193 return V_008F0C_GFX11_FORMAT_16_SSCALED;
194 default:
195 unreachable("bad nfmt");
196 case V_008F0C_BUF_NUM_FORMAT_UINT:
197 return V_008F0C_GFX11_FORMAT_16_UINT;
198 case V_008F0C_BUF_NUM_FORMAT_SINT:
199 return V_008F0C_GFX11_FORMAT_16_SINT;
200 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
201 return V_008F0C_GFX11_FORMAT_16_FLOAT;
202 }
203
204 case V_008F0C_BUF_DATA_FORMAT_16_16:
205 switch (nfmt) {
206 case V_008F0C_BUF_NUM_FORMAT_UNORM:
207 return V_008F0C_GFX11_FORMAT_16_16_UNORM;
208 case V_008F0C_BUF_NUM_FORMAT_SNORM:
209 return V_008F0C_GFX11_FORMAT_16_16_SNORM;
210 case V_008F0C_BUF_NUM_FORMAT_USCALED:
211 return V_008F0C_GFX11_FORMAT_16_16_USCALED;
212 case V_008F0C_BUF_NUM_FORMAT_SSCALED:
213 return V_008F0C_GFX11_FORMAT_16_16_SSCALED;
214 default:
215 unreachable("bad nfmt");
216 case V_008F0C_BUF_NUM_FORMAT_UINT:
217 return V_008F0C_GFX11_FORMAT_16_16_UINT;
218 case V_008F0C_BUF_NUM_FORMAT_SINT:
219 return V_008F0C_GFX11_FORMAT_16_16_SINT;
220 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
221 return V_008F0C_GFX11_FORMAT_16_16_FLOAT;
222 }
223
224 case V_008F0C_BUF_DATA_FORMAT_16_16_16_16:
225 switch (nfmt) {
226 case V_008F0C_BUF_NUM_FORMAT_UNORM:
227 return V_008F0C_GFX11_FORMAT_16_16_16_16_UNORM;
228 case V_008F0C_BUF_NUM_FORMAT_SNORM:
229 return V_008F0C_GFX11_FORMAT_16_16_16_16_SNORM;
230 case V_008F0C_BUF_NUM_FORMAT_USCALED:
231 return V_008F0C_GFX11_FORMAT_16_16_16_16_USCALED;
232 case V_008F0C_BUF_NUM_FORMAT_SSCALED:
233 return V_008F0C_GFX11_FORMAT_16_16_16_16_SSCALED;
234 default:
235 unreachable("bad nfmt");
236 case V_008F0C_BUF_NUM_FORMAT_UINT:
237 return V_008F0C_GFX11_FORMAT_16_16_16_16_UINT;
238 case V_008F0C_BUF_NUM_FORMAT_SINT:
239 return V_008F0C_GFX11_FORMAT_16_16_16_16_SINT;
240 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
241 return V_008F0C_GFX11_FORMAT_16_16_16_16_FLOAT;
242 }
243
244 case V_008F0C_BUF_DATA_FORMAT_32:
245 switch (nfmt) {
246 default:
247 unreachable("bad nfmt");
248 case V_008F0C_BUF_NUM_FORMAT_UINT:
249 return V_008F0C_GFX11_FORMAT_32_UINT;
250 case V_008F0C_BUF_NUM_FORMAT_SINT:
251 return V_008F0C_GFX11_FORMAT_32_SINT;
252 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
253 return V_008F0C_GFX11_FORMAT_32_FLOAT;
254 }
255
256 case V_008F0C_BUF_DATA_FORMAT_32_32:
257 switch (nfmt) {
258 default:
259 unreachable("bad nfmt");
260 case V_008F0C_BUF_NUM_FORMAT_UINT:
261 return V_008F0C_GFX11_FORMAT_32_32_UINT;
262 case V_008F0C_BUF_NUM_FORMAT_SINT:
263 return V_008F0C_GFX11_FORMAT_32_32_SINT;
264 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
265 return V_008F0C_GFX11_FORMAT_32_32_FLOAT;
266 }
267
268 case V_008F0C_BUF_DATA_FORMAT_32_32_32:
269 switch (nfmt) {
270 default:
271 unreachable("bad nfmt");
272 case V_008F0C_BUF_NUM_FORMAT_UINT:
273 return V_008F0C_GFX11_FORMAT_32_32_32_UINT;
274 case V_008F0C_BUF_NUM_FORMAT_SINT:
275 return V_008F0C_GFX11_FORMAT_32_32_32_SINT;
276 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
277 return V_008F0C_GFX11_FORMAT_32_32_32_FLOAT;
278 }
279
280 case V_008F0C_BUF_DATA_FORMAT_32_32_32_32:
281 switch (nfmt) {
282 default:
283 unreachable("bad nfmt");
284 case V_008F0C_BUF_NUM_FORMAT_UINT:
285 return V_008F0C_GFX11_FORMAT_32_32_32_32_UINT;
286 case V_008F0C_BUF_NUM_FORMAT_SINT:
287 return V_008F0C_GFX11_FORMAT_32_32_32_32_SINT;
288 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
289 return V_008F0C_GFX11_FORMAT_32_32_32_32_FLOAT;
290 }
291
292 case V_008F0C_BUF_DATA_FORMAT_2_10_10_10:
293 switch (nfmt) {
294 case V_008F0C_BUF_NUM_FORMAT_UNORM:
295 return V_008F0C_GFX11_FORMAT_2_10_10_10_UNORM;
296 case V_008F0C_BUF_NUM_FORMAT_SNORM:
297 return V_008F0C_GFX11_FORMAT_2_10_10_10_SNORM;
298 case V_008F0C_BUF_NUM_FORMAT_USCALED:
299 return V_008F0C_GFX11_FORMAT_2_10_10_10_USCALED;
300 case V_008F0C_BUF_NUM_FORMAT_SSCALED:
301 return V_008F0C_GFX11_FORMAT_2_10_10_10_SSCALED;
302 default:
303 unreachable("bad nfmt");
304 case V_008F0C_BUF_NUM_FORMAT_UINT:
305 return V_008F0C_GFX11_FORMAT_2_10_10_10_UINT;
306 case V_008F0C_BUF_NUM_FORMAT_SINT:
307 return V_008F0C_GFX11_FORMAT_2_10_10_10_SINT;
308 }
309
310 case V_008F0C_BUF_DATA_FORMAT_10_11_11:
311 switch (nfmt) {
312 default:
313 unreachable("bad nfmt");
314 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
315 return V_008F0C_GFX11_FORMAT_10_11_11_FLOAT;
316 }
317 }
318 } else if (gfx_level >= GFX10) {
319 unsigned format;
320 switch (dfmt) {
321 default:
322 unreachable("bad dfmt");
323 case V_008F0C_BUF_DATA_FORMAT_INVALID:
324 format = V_008F0C_GFX10_FORMAT_INVALID;
325 break;
326 case V_008F0C_BUF_DATA_FORMAT_8:
327 format = V_008F0C_GFX10_FORMAT_8_UINT;
328 break;
329 case V_008F0C_BUF_DATA_FORMAT_8_8:
330 format = V_008F0C_GFX10_FORMAT_8_8_UINT;
331 break;
332 case V_008F0C_BUF_DATA_FORMAT_8_8_8_8:
333 format = V_008F0C_GFX10_FORMAT_8_8_8_8_UINT;
334 break;
335 case V_008F0C_BUF_DATA_FORMAT_16:
336 format = V_008F0C_GFX10_FORMAT_16_UINT;
337 break;
338 case V_008F0C_BUF_DATA_FORMAT_16_16:
339 format = V_008F0C_GFX10_FORMAT_16_16_UINT;
340 break;
341 case V_008F0C_BUF_DATA_FORMAT_16_16_16_16:
342 format = V_008F0C_GFX10_FORMAT_16_16_16_16_UINT;
343 break;
344 case V_008F0C_BUF_DATA_FORMAT_32:
345 format = V_008F0C_GFX10_FORMAT_32_UINT;
346 break;
347 case V_008F0C_BUF_DATA_FORMAT_32_32:
348 format = V_008F0C_GFX10_FORMAT_32_32_UINT;
349 break;
350 case V_008F0C_BUF_DATA_FORMAT_32_32_32:
351 format = V_008F0C_GFX10_FORMAT_32_32_32_UINT;
352 break;
353 case V_008F0C_BUF_DATA_FORMAT_32_32_32_32:
354 format = V_008F0C_GFX10_FORMAT_32_32_32_32_UINT;
355 break;
356 case V_008F0C_BUF_DATA_FORMAT_2_10_10_10:
357 format = V_008F0C_GFX10_FORMAT_2_10_10_10_UINT;
358 break;
359 case V_008F0C_BUF_DATA_FORMAT_10_11_11:
360 format = V_008F0C_GFX10_FORMAT_10_11_11_UINT;
361 break;
362 }
363
364 // Use the regularity properties of the combined format enum.
365 //
366 // Note: float is incompatible with 8-bit data formats,
367 // [us]{norm,scaled} are incomparible with 32-bit data formats.
368 // [us]scaled are not writable.
369 switch (nfmt) {
370 case V_008F0C_BUF_NUM_FORMAT_UNORM:
371 format -= 4;
372 break;
373 case V_008F0C_BUF_NUM_FORMAT_SNORM:
374 format -= 3;
375 break;
376 case V_008F0C_BUF_NUM_FORMAT_USCALED:
377 format -= 2;
378 break;
379 case V_008F0C_BUF_NUM_FORMAT_SSCALED:
380 format -= 1;
381 break;
382 default:
383 unreachable("bad nfmt");
384 case V_008F0C_BUF_NUM_FORMAT_UINT:
385 break;
386 case V_008F0C_BUF_NUM_FORMAT_SINT:
387 format += 1;
388 break;
389 case V_008F0C_BUF_NUM_FORMAT_FLOAT:
390 format += 2;
391 break;
392 }
393
394 return format;
395 } else {
396 return dfmt | (nfmt << 4);
397 }
398 }
399
400 static const struct ac_data_format_info data_format_table[] = {
401 [V_008F0C_BUF_DATA_FORMAT_INVALID] = {0, 4, 0, V_008F0C_BUF_DATA_FORMAT_INVALID},
402 [V_008F0C_BUF_DATA_FORMAT_8] = {1, 1, 1, V_008F0C_BUF_DATA_FORMAT_8},
403 [V_008F0C_BUF_DATA_FORMAT_16] = {2, 1, 2, V_008F0C_BUF_DATA_FORMAT_16},
404 [V_008F0C_BUF_DATA_FORMAT_8_8] = {2, 2, 1, V_008F0C_BUF_DATA_FORMAT_8},
405 [V_008F0C_BUF_DATA_FORMAT_32] = {4, 1, 4, V_008F0C_BUF_DATA_FORMAT_32},
406 [V_008F0C_BUF_DATA_FORMAT_16_16] = {4, 2, 2, V_008F0C_BUF_DATA_FORMAT_16},
407 [V_008F0C_BUF_DATA_FORMAT_10_11_11] = {4, 3, 0, V_008F0C_BUF_DATA_FORMAT_10_11_11},
408 [V_008F0C_BUF_DATA_FORMAT_11_11_10] = {4, 3, 0, V_008F0C_BUF_DATA_FORMAT_11_11_10},
409 [V_008F0C_BUF_DATA_FORMAT_10_10_10_2] = {4, 4, 0, V_008F0C_BUF_DATA_FORMAT_10_10_10_2},
410 [V_008F0C_BUF_DATA_FORMAT_2_10_10_10] = {4, 4, 0, V_008F0C_BUF_DATA_FORMAT_2_10_10_10},
411 [V_008F0C_BUF_DATA_FORMAT_8_8_8_8] = {4, 4, 1, V_008F0C_BUF_DATA_FORMAT_8},
412 [V_008F0C_BUF_DATA_FORMAT_32_32] = {8, 2, 4, V_008F0C_BUF_DATA_FORMAT_32},
413 [V_008F0C_BUF_DATA_FORMAT_16_16_16_16] = {8, 4, 2, V_008F0C_BUF_DATA_FORMAT_16},
414 [V_008F0C_BUF_DATA_FORMAT_32_32_32] = {12, 3, 4, V_008F0C_BUF_DATA_FORMAT_32},
415 [V_008F0C_BUF_DATA_FORMAT_32_32_32_32] = {16, 4, 4, V_008F0C_BUF_DATA_FORMAT_32},
416 };
417
ac_get_data_format_info(unsigned dfmt)418 const struct ac_data_format_info *ac_get_data_format_info(unsigned dfmt)
419 {
420 assert(dfmt < ARRAY_SIZE(data_format_table));
421 return &data_format_table[dfmt];
422 }
423
ac_get_sampler_dim(enum amd_gfx_level gfx_level,enum glsl_sampler_dim dim,bool is_array)424 enum ac_image_dim ac_get_sampler_dim(enum amd_gfx_level gfx_level, enum glsl_sampler_dim dim,
425 bool is_array)
426 {
427 switch (dim) {
428 case GLSL_SAMPLER_DIM_1D:
429 if (gfx_level == GFX9)
430 return is_array ? ac_image_2darray : ac_image_2d;
431 return is_array ? ac_image_1darray : ac_image_1d;
432 case GLSL_SAMPLER_DIM_2D:
433 case GLSL_SAMPLER_DIM_RECT:
434 case GLSL_SAMPLER_DIM_EXTERNAL:
435 return is_array ? ac_image_2darray : ac_image_2d;
436 case GLSL_SAMPLER_DIM_3D:
437 return ac_image_3d;
438 case GLSL_SAMPLER_DIM_CUBE:
439 return ac_image_cube;
440 case GLSL_SAMPLER_DIM_MS:
441 return is_array ? ac_image_2darraymsaa : ac_image_2dmsaa;
442 case GLSL_SAMPLER_DIM_SUBPASS:
443 return ac_image_2darray;
444 case GLSL_SAMPLER_DIM_SUBPASS_MS:
445 return ac_image_2darraymsaa;
446 default:
447 unreachable("bad sampler dim");
448 }
449 }
450
ac_get_image_dim(enum amd_gfx_level gfx_level,enum glsl_sampler_dim sdim,bool is_array)451 enum ac_image_dim ac_get_image_dim(enum amd_gfx_level gfx_level, enum glsl_sampler_dim sdim,
452 bool is_array)
453 {
454 enum ac_image_dim dim = ac_get_sampler_dim(gfx_level, sdim, is_array);
455
456 /* Match the resource type set in the descriptor. */
457 if (dim == ac_image_cube || (gfx_level <= GFX8 && dim == ac_image_3d))
458 dim = ac_image_2darray;
459 else if (sdim == GLSL_SAMPLER_DIM_2D && !is_array && gfx_level == GFX9) {
460 /* When a single layer of a 3D texture is bound, the shader
461 * will refer to a 2D target, but the descriptor has a 3D type.
462 * Since the HW ignores BASE_ARRAY in this case, we need to
463 * send 3 coordinates. This doesn't hurt when the underlying
464 * texture is non-3D.
465 */
466 dim = ac_image_3d;
467 }
468
469 return dim;
470 }
471
ac_get_fs_input_vgpr_cnt(const struct ac_shader_config * config,signed char * face_vgpr_index_ptr,signed char * ancillary_vgpr_index_ptr,signed char * sample_coverage_vgpr_index_ptr)472 unsigned ac_get_fs_input_vgpr_cnt(const struct ac_shader_config *config,
473 signed char *face_vgpr_index_ptr,
474 signed char *ancillary_vgpr_index_ptr,
475 signed char *sample_coverage_vgpr_index_ptr)
476 {
477 unsigned num_input_vgprs = 0;
478 signed char face_vgpr_index = -1;
479 signed char ancillary_vgpr_index = -1;
480 signed char sample_coverage_vgpr_index = -1;
481
482 if (G_0286CC_PERSP_SAMPLE_ENA(config->spi_ps_input_addr))
483 num_input_vgprs += 2;
484 if (G_0286CC_PERSP_CENTER_ENA(config->spi_ps_input_addr))
485 num_input_vgprs += 2;
486 if (G_0286CC_PERSP_CENTROID_ENA(config->spi_ps_input_addr))
487 num_input_vgprs += 2;
488 if (G_0286CC_PERSP_PULL_MODEL_ENA(config->spi_ps_input_addr))
489 num_input_vgprs += 3;
490 if (G_0286CC_LINEAR_SAMPLE_ENA(config->spi_ps_input_addr))
491 num_input_vgprs += 2;
492 if (G_0286CC_LINEAR_CENTER_ENA(config->spi_ps_input_addr))
493 num_input_vgprs += 2;
494 if (G_0286CC_LINEAR_CENTROID_ENA(config->spi_ps_input_addr))
495 num_input_vgprs += 2;
496 if (G_0286CC_LINE_STIPPLE_TEX_ENA(config->spi_ps_input_addr))
497 num_input_vgprs += 1;
498 if (G_0286CC_POS_X_FLOAT_ENA(config->spi_ps_input_addr))
499 num_input_vgprs += 1;
500 if (G_0286CC_POS_Y_FLOAT_ENA(config->spi_ps_input_addr))
501 num_input_vgprs += 1;
502 if (G_0286CC_POS_Z_FLOAT_ENA(config->spi_ps_input_addr))
503 num_input_vgprs += 1;
504 if (G_0286CC_POS_W_FLOAT_ENA(config->spi_ps_input_addr))
505 num_input_vgprs += 1;
506 if (G_0286CC_FRONT_FACE_ENA(config->spi_ps_input_addr)) {
507 face_vgpr_index = num_input_vgprs;
508 num_input_vgprs += 1;
509 }
510 if (G_0286CC_ANCILLARY_ENA(config->spi_ps_input_addr)) {
511 ancillary_vgpr_index = num_input_vgprs;
512 num_input_vgprs += 1;
513 }
514 if (G_0286CC_SAMPLE_COVERAGE_ENA(config->spi_ps_input_addr)) {
515 sample_coverage_vgpr_index = num_input_vgprs;
516 num_input_vgprs += 1;
517 }
518 if (G_0286CC_POS_FIXED_PT_ENA(config->spi_ps_input_addr))
519 num_input_vgprs += 1;
520
521 if (face_vgpr_index_ptr)
522 *face_vgpr_index_ptr = face_vgpr_index;
523 if (ancillary_vgpr_index_ptr)
524 *ancillary_vgpr_index_ptr = ancillary_vgpr_index;
525 if (sample_coverage_vgpr_index_ptr)
526 *sample_coverage_vgpr_index_ptr = sample_coverage_vgpr_index;
527
528 return num_input_vgprs;
529 }
530
ac_choose_spi_color_formats(unsigned format,unsigned swap,unsigned ntype,bool is_depth,bool use_rbplus,struct ac_spi_color_formats * formats)531 void ac_choose_spi_color_formats(unsigned format, unsigned swap, unsigned ntype,
532 bool is_depth, bool use_rbplus,
533 struct ac_spi_color_formats *formats)
534 {
535 /* Alpha is needed for alpha-to-coverage.
536 * Blending may be with or without alpha.
537 */
538 unsigned normal = 0; /* most optimal, may not support blending or export alpha */
539 unsigned alpha = 0; /* exports alpha, but may not support blending */
540 unsigned blend = 0; /* supports blending, but may not export alpha */
541 unsigned blend_alpha = 0; /* least optimal, supports blending and exports alpha */
542
543 /* Choose the SPI color formats. These are required values for RB+.
544 * Other chips have multiple choices, though they are not necessarily better.
545 */
546 switch (format) {
547 case V_028C70_COLOR_5_6_5:
548 case V_028C70_COLOR_1_5_5_5:
549 case V_028C70_COLOR_5_5_5_1:
550 case V_028C70_COLOR_4_4_4_4:
551 case V_028C70_COLOR_10_11_11:
552 case V_028C70_COLOR_11_11_10:
553 case V_028C70_COLOR_5_9_9_9:
554 case V_028C70_COLOR_8:
555 case V_028C70_COLOR_8_8:
556 case V_028C70_COLOR_8_8_8_8:
557 case V_028C70_COLOR_10_10_10_2:
558 case V_028C70_COLOR_2_10_10_10:
559 if (ntype == V_028C70_NUMBER_UINT)
560 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_UINT16_ABGR;
561 else if (ntype == V_028C70_NUMBER_SINT)
562 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_SINT16_ABGR;
563 else
564 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_FP16_ABGR;
565
566 if (!use_rbplus && format == V_028C70_COLOR_8 &&
567 ntype != V_028C70_NUMBER_SRGB && swap == V_028C70_SWAP_STD) /* R */ {
568 /* When RB+ is enabled, R8_UNORM should use FP16_ABGR for 2x
569 * exporting performance. Otherwise, use 32_R to remove useless
570 * instructions needed for 16-bit compressed exports.
571 */
572 blend = normal = V_028714_SPI_SHADER_32_R;
573 }
574 break;
575
576 case V_028C70_COLOR_16:
577 case V_028C70_COLOR_16_16:
578 case V_028C70_COLOR_16_16_16_16:
579 if (ntype == V_028C70_NUMBER_UNORM || ntype == V_028C70_NUMBER_SNORM) {
580 /* UNORM16 and SNORM16 don't support blending */
581 if (ntype == V_028C70_NUMBER_UNORM)
582 normal = alpha = V_028714_SPI_SHADER_UNORM16_ABGR;
583 else
584 normal = alpha = V_028714_SPI_SHADER_SNORM16_ABGR;
585
586 /* Use 32 bits per channel for blending. */
587 if (format == V_028C70_COLOR_16) {
588 if (swap == V_028C70_SWAP_STD) { /* R */
589 blend = V_028714_SPI_SHADER_32_R;
590 blend_alpha = V_028714_SPI_SHADER_32_AR;
591 } else if (swap == V_028C70_SWAP_ALT_REV) /* A */
592 blend = blend_alpha = V_028714_SPI_SHADER_32_AR;
593 else
594 assert(0);
595 } else if (format == V_028C70_COLOR_16_16) {
596 if (swap == V_028C70_SWAP_STD || swap == V_028C70_SWAP_STD_REV) { /* RG or GR */
597 blend = V_028714_SPI_SHADER_32_GR;
598 blend_alpha = V_028714_SPI_SHADER_32_ABGR;
599 } else if (swap == V_028C70_SWAP_ALT) /* RA */
600 blend = blend_alpha = V_028714_SPI_SHADER_32_AR;
601 else
602 assert(0);
603 } else /* 16_16_16_16 */
604 blend = blend_alpha = V_028714_SPI_SHADER_32_ABGR;
605 } else if (ntype == V_028C70_NUMBER_UINT)
606 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_UINT16_ABGR;
607 else if (ntype == V_028C70_NUMBER_SINT)
608 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_SINT16_ABGR;
609 else if (ntype == V_028C70_NUMBER_FLOAT)
610 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_FP16_ABGR;
611 else
612 assert(0);
613 break;
614
615 case V_028C70_COLOR_32:
616 if (swap == V_028C70_SWAP_STD) { /* R */
617 blend = normal = V_028714_SPI_SHADER_32_R;
618 alpha = blend_alpha = V_028714_SPI_SHADER_32_AR;
619 } else if (swap == V_028C70_SWAP_ALT_REV) /* A */
620 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_AR;
621 else
622 assert(0);
623 break;
624
625 case V_028C70_COLOR_32_32:
626 if (swap == V_028C70_SWAP_STD || swap == V_028C70_SWAP_STD_REV) { /* RG or GR */
627 blend = normal = V_028714_SPI_SHADER_32_GR;
628 alpha = blend_alpha = V_028714_SPI_SHADER_32_ABGR;
629 } else if (swap == V_028C70_SWAP_ALT) /* RA */
630 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_AR;
631 else
632 assert(0);
633 break;
634
635 case V_028C70_COLOR_32_32_32_32:
636 case V_028C70_COLOR_8_24:
637 case V_028C70_COLOR_24_8:
638 case V_028C70_COLOR_X24_8_32_FLOAT:
639 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_ABGR;
640 break;
641
642 default:
643 assert(0);
644 return;
645 }
646
647 /* The DB->CB copy needs 32_ABGR. */
648 if (is_depth)
649 alpha = blend = blend_alpha = normal = V_028714_SPI_SHADER_32_ABGR;
650
651 formats->normal = normal;
652 formats->alpha = alpha;
653 formats->blend = blend;
654 formats->blend_alpha = blend_alpha;
655 }
656
ac_compute_late_alloc(const struct radeon_info * info,bool ngg,bool ngg_culling,bool uses_scratch,unsigned * late_alloc_wave64,unsigned * cu_mask)657 void ac_compute_late_alloc(const struct radeon_info *info, bool ngg, bool ngg_culling,
658 bool uses_scratch, unsigned *late_alloc_wave64, unsigned *cu_mask)
659 {
660 *late_alloc_wave64 = 0; /* The limit is per SA. */
661 *cu_mask = 0xffff;
662
663 /* CU masking can decrease performance and cause a hang with <= 2 CUs per SA. */
664 if (info->min_good_cu_per_sa <= 2)
665 return;
666
667 /* If scratch is used with late alloc, the GPU could deadlock if PS uses scratch too. A more
668 * complicated computation is needed to enable late alloc with scratch (see PAL).
669 */
670 if (uses_scratch)
671 return;
672
673 /* Late alloc is not used for NGG on Navi14 due to a hw bug. */
674 if (ngg && info->family == CHIP_NAVI14)
675 return;
676
677 if (info->gfx_level >= GFX10) {
678 /* For Wave32, the hw will launch twice the number of late alloc waves, so 1 == 2x wave32.
679 * These limits are estimated because they are all safe but they vary in performance.
680 */
681 if (ngg_culling)
682 *late_alloc_wave64 = info->min_good_cu_per_sa * 10;
683 else
684 *late_alloc_wave64 = info->min_good_cu_per_sa * 4;
685
686 /* Limit LATE_ALLOC_GS to prevent a hang (hw bug) on gfx10. */
687 if (info->gfx_level == GFX10 && ngg)
688 *late_alloc_wave64 = MIN2(*late_alloc_wave64, 64);
689
690 /* Gfx10: CU2 & CU3 must be disabled to prevent a hw deadlock.
691 * Others: CU1 must be disabled to prevent a hw deadlock.
692 *
693 * The deadlock is caused by late alloc, which usually increases performance.
694 */
695 *cu_mask &= info->gfx_level == GFX10 ? ~BITFIELD_RANGE(2, 2) :
696 ~BITFIELD_RANGE(1, 1);
697 } else {
698 if (info->min_good_cu_per_sa <= 4) {
699 /* Too few available compute units per SA. Disallowing VS to run on one CU could hurt us
700 * more than late VS allocation would help.
701 *
702 * 2 is the highest safe number that allows us to keep all CUs enabled.
703 */
704 *late_alloc_wave64 = 2;
705 } else {
706 /* This is a good initial value, allowing 1 late_alloc wave per SIMD on num_cu - 2.
707 */
708 *late_alloc_wave64 = (info->min_good_cu_per_sa - 2) * 4;
709 }
710
711 /* VS can't execute on one CU if the limit is > 2. */
712 if (*late_alloc_wave64 > 2)
713 *cu_mask = 0xfffe; /* 1 CU disabled */
714 }
715
716 /* Max number that fits into the register field. */
717 if (ngg) /* GS */
718 *late_alloc_wave64 = MIN2(*late_alloc_wave64, G_00B204_SPI_SHADER_LATE_ALLOC_GS_GFX10(~0u));
719 else /* VS */
720 *late_alloc_wave64 = MIN2(*late_alloc_wave64, G_00B11C_LIMIT(~0u));
721 }
722
ac_compute_cs_workgroup_size(uint16_t sizes[3],bool variable,unsigned max)723 unsigned ac_compute_cs_workgroup_size(uint16_t sizes[3], bool variable, unsigned max)
724 {
725 if (variable)
726 return max;
727
728 return sizes[0] * sizes[1] * sizes[2];
729 }
730
ac_compute_lshs_workgroup_size(enum amd_gfx_level gfx_level,gl_shader_stage stage,unsigned tess_num_patches,unsigned tess_patch_in_vtx,unsigned tess_patch_out_vtx)731 unsigned ac_compute_lshs_workgroup_size(enum amd_gfx_level gfx_level, gl_shader_stage stage,
732 unsigned tess_num_patches,
733 unsigned tess_patch_in_vtx,
734 unsigned tess_patch_out_vtx)
735 {
736 /* When tessellation is used, API VS runs on HW LS, API TCS runs on HW HS.
737 * These two HW stages are merged on GFX9+.
738 */
739
740 bool merged_shaders = gfx_level >= GFX9;
741 unsigned ls_workgroup_size = tess_num_patches * tess_patch_in_vtx;
742 unsigned hs_workgroup_size = tess_num_patches * tess_patch_out_vtx;
743
744 if (merged_shaders)
745 return MAX2(ls_workgroup_size, hs_workgroup_size);
746 else if (stage == MESA_SHADER_VERTEX)
747 return ls_workgroup_size;
748 else if (stage == MESA_SHADER_TESS_CTRL)
749 return hs_workgroup_size;
750 else
751 unreachable("invalid LSHS shader stage");
752 }
753
ac_compute_esgs_workgroup_size(enum amd_gfx_level gfx_level,unsigned wave_size,unsigned es_verts,unsigned gs_inst_prims)754 unsigned ac_compute_esgs_workgroup_size(enum amd_gfx_level gfx_level, unsigned wave_size,
755 unsigned es_verts, unsigned gs_inst_prims)
756 {
757 /* ESGS may operate in workgroups if on-chip GS (LDS rings) are enabled.
758 *
759 * GFX6: Not possible in the HW.
760 * GFX7-8 (unmerged): possible in the HW, but not implemented in Mesa.
761 * GFX9+ (merged): implemented in Mesa.
762 */
763
764 if (gfx_level <= GFX8)
765 return wave_size;
766
767 unsigned workgroup_size = MAX2(es_verts, gs_inst_prims);
768 return CLAMP(workgroup_size, 1, 256);
769 }
770
ac_compute_ngg_workgroup_size(unsigned es_verts,unsigned gs_inst_prims,unsigned max_vtx_out,unsigned prim_amp_factor)771 unsigned ac_compute_ngg_workgroup_size(unsigned es_verts, unsigned gs_inst_prims,
772 unsigned max_vtx_out, unsigned prim_amp_factor)
773 {
774 /* NGG always operates in workgroups.
775 *
776 * For API VS/TES/GS:
777 * - 1 invocation per input vertex
778 * - 1 invocation per input primitive
779 *
780 * The same invocation can process both an input vertex and primitive,
781 * however 1 invocation can only output up to 1 vertex and 1 primitive.
782 */
783
784 unsigned max_vtx_in = es_verts < 256 ? es_verts : 3 * gs_inst_prims;
785 unsigned max_prim_in = gs_inst_prims;
786 unsigned max_prim_out = gs_inst_prims * prim_amp_factor;
787 unsigned workgroup_size = MAX4(max_vtx_in, max_vtx_out, max_prim_in, max_prim_out);
788
789 return CLAMP(workgroup_size, 1, 256);
790 }
791
ac_set_reg_cu_en(void * cs,unsigned reg_offset,uint32_t value,uint32_t clear_mask,unsigned value_shift,const struct radeon_info * info,void set_sh_reg (void *,unsigned,uint32_t))792 void ac_set_reg_cu_en(void *cs, unsigned reg_offset, uint32_t value, uint32_t clear_mask,
793 unsigned value_shift, const struct radeon_info *info,
794 void set_sh_reg(void*, unsigned, uint32_t))
795 {
796 /* Register field position and mask. */
797 uint32_t cu_en_mask = ~clear_mask;
798 unsigned cu_en_shift = ffs(cu_en_mask) - 1;
799 /* The value being set. */
800 uint32_t cu_en = (value & cu_en_mask) >> cu_en_shift;
801
802 /* AND the field by spi_cu_en. */
803 uint32_t spi_cu_en = info->spi_cu_en >> value_shift;
804 uint32_t new_value = (value & ~cu_en_mask) |
805 (((cu_en & spi_cu_en) << cu_en_shift) & cu_en_mask);
806
807 set_sh_reg(cs, reg_offset, new_value);
808 }
809
810 /* Return the register value and tune bytes_per_wave to increase scratch performance. */
ac_get_scratch_tmpring_size(const struct radeon_info * info,unsigned bytes_per_wave,unsigned * max_seen_bytes_per_wave,uint32_t * tmpring_size)811 void ac_get_scratch_tmpring_size(const struct radeon_info *info,
812 unsigned bytes_per_wave, unsigned *max_seen_bytes_per_wave,
813 uint32_t *tmpring_size)
814 {
815 /* SPI_TMPRING_SIZE and COMPUTE_TMPRING_SIZE are essentially scratch buffer descriptors.
816 * WAVES means NUM_RECORDS. WAVESIZE is the size of each element, meaning STRIDE.
817 * Thus, WAVESIZE must be constant while the scratch buffer is being used by the GPU.
818 *
819 * If you want to increase WAVESIZE without waiting for idle, you need to allocate a new
820 * scratch buffer and use it instead. This will result in multiple scratch buffers being
821 * used at the same time, each with a different WAVESIZE.
822 *
823 * If you want to decrease WAVESIZE, you don't have to. There is no advantage in decreasing
824 * WAVESIZE after it's been increased.
825 *
826 * Shaders with SCRATCH_EN=0 don't allocate scratch space.
827 */
828 const unsigned size_shift = info->gfx_level >= GFX11 ? 8 : 10;
829 const unsigned min_size_per_wave = BITFIELD_BIT(size_shift);
830
831 /* The LLVM shader backend should be reporting aligned scratch_sizes. */
832 assert((bytes_per_wave & BITFIELD_MASK(size_shift)) == 0 &&
833 "scratch size per wave should be aligned");
834
835 /* Add 1 scratch item to make the number of items odd. This should improve scratch
836 * performance by more randomly distributing scratch waves among memory channels.
837 */
838 if (bytes_per_wave)
839 bytes_per_wave |= min_size_per_wave;
840
841 *max_seen_bytes_per_wave = MAX2(*max_seen_bytes_per_wave, bytes_per_wave);
842
843 unsigned max_scratch_waves = info->max_scratch_waves;
844 if (info->gfx_level >= GFX11)
845 max_scratch_waves /= info->num_se; /* WAVES is per SE */
846
847 /* TODO: We could decrease WAVES to make the whole buffer fit into the infinity cache. */
848 *tmpring_size = S_0286E8_WAVES(max_scratch_waves) |
849 S_0286E8_WAVESIZE(*max_seen_bytes_per_wave >> size_shift);
850 }
851