1 /*
2 * Copyright © 2011 Red Hat All Rights Reserved.
3 * Copyright © 2017 Advanced Micro Devices, Inc.
4 *
5 * SPDX-License-Identifier: MIT
6 */
7
8 #define AC_SURFACE_INCLUDE_NIR
9 #include "ac_surface.h"
10
11 #include "ac_drm_fourcc.h"
12 #include "ac_gpu_info.h"
13 #include "addrlib/inc/addrinterface.h"
14 #include "addrlib/src/amdgpu_asic_addr.h"
15 #include "amd_family.h"
16 #include "sid.h"
17 #include "util/hash_table.h"
18 #include "util/macros.h"
19 #include "util/simple_mtx.h"
20 #include "util/u_atomic.h"
21 #include "util/format/u_format.h"
22 #include "util/u_math.h"
23 #include "util/u_memory.h"
24
25 #include <errno.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28
29 #ifdef _WIN32
30 #define AMDGPU_TILING_ARRAY_MODE_SHIFT 0
31 #define AMDGPU_TILING_ARRAY_MODE_MASK 0xf
32 #define AMDGPU_TILING_PIPE_CONFIG_SHIFT 4
33 #define AMDGPU_TILING_PIPE_CONFIG_MASK 0x1f
34 #define AMDGPU_TILING_TILE_SPLIT_SHIFT 9
35 #define AMDGPU_TILING_TILE_SPLIT_MASK 0x7
36 #define AMDGPU_TILING_MICRO_TILE_MODE_SHIFT 12
37 #define AMDGPU_TILING_MICRO_TILE_MODE_MASK 0x7
38 #define AMDGPU_TILING_BANK_WIDTH_SHIFT 15
39 #define AMDGPU_TILING_BANK_WIDTH_MASK 0x3
40 #define AMDGPU_TILING_BANK_HEIGHT_SHIFT 17
41 #define AMDGPU_TILING_BANK_HEIGHT_MASK 0x3
42 #define AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT 19
43 #define AMDGPU_TILING_MACRO_TILE_ASPECT_MASK 0x3
44 #define AMDGPU_TILING_NUM_BANKS_SHIFT 21
45 #define AMDGPU_TILING_NUM_BANKS_MASK 0x3
46 #define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
47 #define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
48 #define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5
49 #define AMDGPU_TILING_DCC_OFFSET_256B_MASK 0xFFFFFF
50 #define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT 29
51 #define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF
52 #define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43
53 #define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1
54 #define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT 44
55 #define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK 0x1
56 #define AMDGPU_TILING_SCANOUT_SHIFT 63
57 #define AMDGPU_TILING_SCANOUT_MASK 0x1
58 #define AMDGPU_TILING_SET(field, value) \
59 (((__u64)(value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
60 #define AMDGPU_TILING_GET(value, field) \
61 (((__u64)(value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK)
62 #else
63 #include "drm-uapi/amdgpu_drm.h"
64 #endif
65
66 #ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
67 #define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
68 #endif
69
70 #ifndef CIASICIDGFXENGINE_ARCTICISLAND
71 #define CIASICIDGFXENGINE_ARCTICISLAND 0x0000000D
72 #endif
73
74 struct ac_addrlib {
75 ADDR_HANDLE handle;
76 simple_mtx_t lock;
77 };
78
ac_pipe_config_to_num_pipes(unsigned pipe_config)79 unsigned ac_pipe_config_to_num_pipes(unsigned pipe_config)
80 {
81 switch (pipe_config) {
82 case V_009910_ADDR_SURF_P2:
83 return 2;
84 case V_009910_ADDR_SURF_P4_8x16:
85 case V_009910_ADDR_SURF_P4_16x16:
86 case V_009910_ADDR_SURF_P4_16x32:
87 case V_009910_ADDR_SURF_P4_32x32:
88 return 4;
89 case V_009910_ADDR_SURF_P8_16x16_8x16:
90 case V_009910_ADDR_SURF_P8_16x32_8x16:
91 case V_009910_ADDR_SURF_P8_32x32_8x16:
92 case V_009910_ADDR_SURF_P8_16x32_16x16:
93 case V_009910_ADDR_SURF_P8_32x32_16x16:
94 case V_009910_ADDR_SURF_P8_32x32_16x32:
95 case V_009910_ADDR_SURF_P8_32x64_32x32:
96 return 8;
97 case V_009910_ADDR_SURF_P16_32x32_8x16:
98 case V_009910_ADDR_SURF_P16_32x32_16x16:
99 return 16;
100 default:
101 unreachable("invalid pipe_config");
102 }
103 }
104
ac_modifier_has_dcc(uint64_t modifier)105 bool ac_modifier_has_dcc(uint64_t modifier)
106 {
107 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
108 }
109
ac_modifier_has_dcc_retile(uint64_t modifier)110 bool ac_modifier_has_dcc_retile(uint64_t modifier)
111 {
112 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC_RETILE, modifier);
113 }
114
ac_modifier_supports_dcc_image_stores(enum amd_gfx_level gfx_level,uint64_t modifier)115 bool ac_modifier_supports_dcc_image_stores(enum amd_gfx_level gfx_level, uint64_t modifier)
116 {
117 if (!ac_modifier_has_dcc(modifier))
118 return false;
119
120 return (!AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier) &&
121 AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier) &&
122 AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier) == AMD_FMT_MOD_DCC_BLOCK_128B) ||
123 (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && /* gfx10.3 */
124 AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier) &&
125 AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier) &&
126 AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier) == AMD_FMT_MOD_DCC_BLOCK_64B) ||
127 (gfx_level >= GFX11_5 &&
128 AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX11 &&
129 !AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier) &&
130 AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier) &&
131 AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier) == AMD_FMT_MOD_DCC_BLOCK_256B);
132
133 }
134
135
ac_surface_supports_dcc_image_stores(enum amd_gfx_level gfx_level,const struct radeon_surf * surf)136 bool ac_surface_supports_dcc_image_stores(enum amd_gfx_level gfx_level,
137 const struct radeon_surf *surf)
138 {
139 /* DCC image stores is only available for GFX10+. */
140 if (gfx_level < GFX10)
141 return false;
142
143 /* DCC image stores support the following settings:
144 * - INDEPENDENT_64B_BLOCKS = 0
145 * - INDEPENDENT_128B_BLOCKS = 1
146 * - MAX_COMPRESSED_BLOCK_SIZE = 128B
147 * - MAX_UNCOMPRESSED_BLOCK_SIZE = 256B (always used)
148 *
149 * gfx10.3 also supports the following setting:
150 * - INDEPENDENT_64B_BLOCKS = 1
151 * - INDEPENDENT_128B_BLOCKS = 1
152 * - MAX_COMPRESSED_BLOCK_SIZE = 64B
153 * - MAX_UNCOMPRESSED_BLOCK_SIZE = 256B (always used)
154 *
155 * gfx11.5 also supports the following:
156 * - INDEPENDENT_64B_BLOCKS = 0
157 * - INDEPENDENT_128B_BLOCKS = 1
158 * - MAX_COMPRESSED_BLOCK_SIZE = 256B
159 * - MAX_UNCOMPRESSED_BLOCK_SIZE = 256B (always used)
160 *
161 * The compressor only looks at MAX_COMPRESSED_BLOCK_SIZE to determine
162 * the INDEPENDENT_xx_BLOCKS settings. 128B implies INDEP_128B, while 64B
163 * implies INDEP_64B && INDEP_128B.
164 *
165 * The same limitations apply to SDMA compressed stores because
166 * SDMA uses the same DCC codec.
167 */
168 return (!surf->u.gfx9.color.dcc.independent_64B_blocks &&
169 surf->u.gfx9.color.dcc.independent_128B_blocks &&
170 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_128B) ||
171 (gfx_level >= GFX10_3 && /* gfx10.3 - old 64B compression */
172 surf->u.gfx9.color.dcc.independent_64B_blocks &&
173 surf->u.gfx9.color.dcc.independent_128B_blocks &&
174 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B) ||
175 (gfx_level >= GFX11_5 && /* gfx11.5 - new 256B compression */
176 !surf->u.gfx9.color.dcc.independent_64B_blocks &&
177 surf->u.gfx9.color.dcc.independent_128B_blocks &&
178 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_256B);
179 }
180
ac_get_modifier_swizzle_mode(enum amd_gfx_level gfx_level,uint64_t modifier)181 static unsigned ac_get_modifier_swizzle_mode(enum amd_gfx_level gfx_level, uint64_t modifier)
182 {
183 if (modifier == DRM_FORMAT_MOD_LINEAR)
184 return ADDR_SW_LINEAR;
185
186 return AMD_FMT_MOD_GET(TILE, modifier);
187 }
188
189 static void
ac_modifier_fill_dcc_params(uint64_t modifier,struct radeon_surf * surf,ADDR2_COMPUTE_SURFACE_INFO_INPUT * surf_info)190 ac_modifier_fill_dcc_params(uint64_t modifier, struct radeon_surf *surf,
191 ADDR2_COMPUTE_SURFACE_INFO_INPUT *surf_info)
192 {
193 assert(ac_modifier_has_dcc(modifier));
194
195 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
196 surf_info->flags.metaPipeUnaligned = 0;
197 } else {
198 surf_info->flags.metaPipeUnaligned = !AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
199 }
200
201 /* The metaPipeUnaligned is not strictly necessary, but ensure we don't set metaRbUnaligned on
202 * non-displayable DCC surfaces just because num_render_backends = 1 */
203 surf_info->flags.metaRbUnaligned = AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
204 AMD_FMT_MOD_GET(RB, modifier) == 0 &&
205 surf_info->flags.metaPipeUnaligned;
206
207 surf->u.gfx9.color.dcc.independent_64B_blocks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
208 surf->u.gfx9.color.dcc.independent_128B_blocks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
209 surf->u.gfx9.color.dcc.max_compressed_block_size = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
210 }
211
ac_is_modifier_supported(const struct radeon_info * info,const struct ac_modifier_options * options,enum pipe_format format,uint64_t modifier)212 bool ac_is_modifier_supported(const struct radeon_info *info,
213 const struct ac_modifier_options *options,
214 enum pipe_format format,
215 uint64_t modifier)
216 {
217
218 if (util_format_is_compressed(format) ||
219 util_format_is_depth_or_stencil(format) ||
220 util_format_get_blocksizebits(format) > 64)
221 return false;
222
223 if (info->gfx_level < GFX9)
224 return false;
225
226 if(modifier == DRM_FORMAT_MOD_LINEAR)
227 return true;
228
229 /* GFX8 may need a different modifier for each plane */
230 if (info->gfx_level < GFX9 && util_format_get_num_planes(format) > 1)
231 return false;
232
233 uint32_t allowed_swizzles = 0xFFFFFFFF;
234 switch(info->gfx_level) {
235 case GFX9:
236 allowed_swizzles = ac_modifier_has_dcc(modifier) ? 0x06000000 : 0x06660660;
237 break;
238 case GFX10:
239 case GFX10_3:
240 allowed_swizzles = ac_modifier_has_dcc(modifier) ? 0x08000000 : 0x0E660660;
241 break;
242 case GFX11:
243 case GFX11_5:
244 allowed_swizzles = ac_modifier_has_dcc(modifier) ? 0x88000000 : 0xCC440440;
245 break;
246 default:
247 return false;
248 }
249
250 if (!((1u << ac_get_modifier_swizzle_mode(info->gfx_level, modifier)) & allowed_swizzles))
251 return false;
252
253 if (ac_modifier_has_dcc(modifier)) {
254 /* TODO: support multi-planar formats with DCC */
255 if (util_format_get_num_planes(format) > 1)
256 return false;
257
258 if (!info->has_graphics)
259 return false;
260
261 if (!options->dcc)
262 return false;
263
264 if (ac_modifier_has_dcc_retile(modifier) &&
265 (!info->use_display_dcc_with_retile_blit || !options->dcc_retile))
266 return false;
267 }
268
269 return true;
270 }
271
ac_get_supported_modifiers(const struct radeon_info * info,const struct ac_modifier_options * options,enum pipe_format format,unsigned * mod_count,uint64_t * mods)272 bool ac_get_supported_modifiers(const struct radeon_info *info,
273 const struct ac_modifier_options *options,
274 enum pipe_format format,
275 unsigned *mod_count,
276 uint64_t *mods)
277 {
278 unsigned current_mod = 0;
279
280 #define ADD_MOD(name) \
281 if (ac_is_modifier_supported(info, options, format, (name))) { \
282 if (mods && current_mod < *mod_count) \
283 mods[current_mod] = (name); \
284 ++current_mod; \
285 }
286
287 /* The modifiers have to be added in descending order of estimated
288 * performance. The drivers will prefer modifiers that come earlier
289 * in the list. */
290 switch (info->gfx_level) {
291 case GFX9: {
292 unsigned pipe_xor_bits = MIN2(G_0098F8_NUM_PIPES(info->gb_addr_config) +
293 G_0098F8_NUM_SHADER_ENGINES_GFX9(info->gb_addr_config), 8);
294 unsigned bank_xor_bits = MIN2(G_0098F8_NUM_BANKS(info->gb_addr_config), 8 - pipe_xor_bits);
295 unsigned pipes = G_0098F8_NUM_PIPES(info->gb_addr_config);
296 unsigned rb = G_0098F8_NUM_RB_PER_SE(info->gb_addr_config) +
297 G_0098F8_NUM_SHADER_ENGINES_GFX9(info->gb_addr_config);
298
299 uint64_t common_dcc = AMD_FMT_MOD_SET(DCC, 1) |
300 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
301 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
302 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, info->has_dcc_constant_encode) |
303 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
304 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits);
305
306 ADD_MOD(AMD_FMT_MOD |
307 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
308 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
309 AMD_FMT_MOD_SET(DCC_PIPE_ALIGN, 1) |
310 common_dcc |
311 AMD_FMT_MOD_SET(PIPE, pipes) |
312 AMD_FMT_MOD_SET(RB, rb))
313
314 ADD_MOD(AMD_FMT_MOD |
315 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
316 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
317 AMD_FMT_MOD_SET(DCC_PIPE_ALIGN, 1) |
318 common_dcc |
319 AMD_FMT_MOD_SET(PIPE, pipes) |
320 AMD_FMT_MOD_SET(RB, rb))
321
322 if (util_format_get_blocksizebits(format) == 32) {
323 if (info->max_render_backends == 1) {
324 ADD_MOD(AMD_FMT_MOD |
325 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
326 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
327 common_dcc);
328 }
329
330
331 ADD_MOD(AMD_FMT_MOD |
332 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
333 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
334 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
335 common_dcc |
336 AMD_FMT_MOD_SET(PIPE, pipes) |
337 AMD_FMT_MOD_SET(RB, rb))
338 }
339
340
341 ADD_MOD(AMD_FMT_MOD |
342 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
343 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
344 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
345 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
346
347 ADD_MOD(AMD_FMT_MOD |
348 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
349 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
350 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
351 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
352
353 ADD_MOD(AMD_FMT_MOD |
354 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
355 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
356
357 ADD_MOD(AMD_FMT_MOD |
358 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
359 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
360
361 ADD_MOD(DRM_FORMAT_MOD_LINEAR)
362 break;
363 }
364 case GFX10:
365 case GFX10_3: {
366 bool rbplus = info->gfx_level >= GFX10_3;
367 unsigned pipe_xor_bits = G_0098F8_NUM_PIPES(info->gb_addr_config);
368 unsigned pkrs = rbplus ? G_0098F8_NUM_PKRS(info->gb_addr_config) : 0;
369
370 unsigned version = rbplus ? AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS : AMD_FMT_MOD_TILE_VER_GFX10;
371 uint64_t common_dcc = AMD_FMT_MOD_SET(TILE_VERSION, version) |
372 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
373 AMD_FMT_MOD_SET(DCC, 1) |
374 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
375 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
376 AMD_FMT_MOD_SET(PACKERS, pkrs);
377
378 ADD_MOD(AMD_FMT_MOD | common_dcc |
379 AMD_FMT_MOD_SET(DCC_PIPE_ALIGN, 1) |
380 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
381 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B))
382
383 if (info->gfx_level >= GFX10_3) {
384 ADD_MOD(AMD_FMT_MOD | common_dcc |
385 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
386 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
387 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B))
388
389 ADD_MOD(AMD_FMT_MOD | common_dcc |
390 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
391 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
392 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
393 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B))
394 }
395
396 ADD_MOD(AMD_FMT_MOD |
397 AMD_FMT_MOD_SET(TILE_VERSION, version) |
398 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
399 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
400 AMD_FMT_MOD_SET(PACKERS, pkrs))
401
402 ADD_MOD(AMD_FMT_MOD |
403 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
404 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
405 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits))
406
407 if (util_format_get_blocksizebits(format) != 32) {
408 ADD_MOD(AMD_FMT_MOD |
409 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
410 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
411 }
412
413 ADD_MOD(AMD_FMT_MOD |
414 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
415 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
416
417 ADD_MOD(DRM_FORMAT_MOD_LINEAR)
418 break;
419 }
420 case GFX11:
421 case GFX11_5: {
422 /* GFX11 has new microblock organization. No S modes for 2D. */
423 unsigned pipe_xor_bits = G_0098F8_NUM_PIPES(info->gb_addr_config);
424 unsigned pkrs = G_0098F8_NUM_PKRS(info->gb_addr_config);
425 unsigned num_pipes = 1 << pipe_xor_bits;
426
427 /* R_X swizzle modes are the best for rendering and DCC requires them. */
428 for (unsigned i = 0; i < 2; i++) {
429 unsigned swizzle_r_x;
430
431 /* Insert the best one first. */
432 if (num_pipes > 16)
433 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
434 else
435 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
436
437 /* Disable 256K on APUs because it doesn't work with DAL. */
438 if (!info->has_dedicated_vram && swizzle_r_x == AMD_FMT_MOD_TILE_GFX11_256K_R_X)
439 continue;
440
441 uint64_t modifier_r_x = AMD_FMT_MOD |
442 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
443 AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
444 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
445 AMD_FMT_MOD_SET(PACKERS, pkrs);
446
447 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
448 uint64_t modifier_dcc_best_gfx11_5 = modifier_r_x |
449 AMD_FMT_MOD_SET(DCC, 1) |
450 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
451 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
452 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_256B);
453
454 uint64_t modifier_dcc_best = modifier_r_x |
455 AMD_FMT_MOD_SET(DCC, 1) |
456 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
457 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
458 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
459
460 /* DCC settings for 4K and greater resolutions. (required by display hw) */
461 uint64_t modifier_dcc_4k = modifier_r_x |
462 AMD_FMT_MOD_SET(DCC, 1) |
463 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
464 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
465 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
466
467 /* Modifiers have to be sorted from best to worst.
468 *
469 * Top level order:
470 * 1. The best chip-specific modifiers with DCC, potentially non-displayable.
471 * 2. Chip-specific displayable modifiers with DCC.
472 * 3. Chip-specific displayable modifiers without DCC.
473 * 4. Chip-independent modifiers without DCC.
474 * 5. Linear.
475 */
476
477 /* Add the best non-displayable modifier first. */
478 if (info->gfx_level == GFX11_5)
479 ADD_MOD(modifier_dcc_best_gfx11_5 | AMD_FMT_MOD_SET(DCC_PIPE_ALIGN, 1));
480
481 ADD_MOD(modifier_dcc_best | AMD_FMT_MOD_SET(DCC_PIPE_ALIGN, 1));
482
483 /* Displayable modifiers are next. */
484 /* Add other displayable DCC settings. (DCC_RETILE implies displayable on all chips) */
485 ADD_MOD(modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1))
486 ADD_MOD(modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1))
487
488 /* Add one without DCC that is displayable (it's also optimal for non-displayable cases). */
489 ADD_MOD(modifier_r_x)
490 }
491
492 /* Add one that is compatible with other gfx11 chips. */
493 ADD_MOD(AMD_FMT_MOD |
494 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
495 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D))
496
497 /* Linear must be last. */
498 ADD_MOD(DRM_FORMAT_MOD_LINEAR)
499 break;
500 }
501 default:
502 break;
503 }
504
505 #undef ADD_MOD
506
507 if (!mods) {
508 *mod_count = current_mod;
509 return true;
510 }
511
512 bool complete = current_mod <= *mod_count;
513 *mod_count = MIN2(*mod_count, current_mod);
514 return complete;
515 }
516
allocSysMem(const ADDR_ALLOCSYSMEM_INPUT * pInput)517 static void *ADDR_API allocSysMem(const ADDR_ALLOCSYSMEM_INPUT *pInput)
518 {
519 return malloc(pInput->sizeInBytes);
520 }
521
freeSysMem(const ADDR_FREESYSMEM_INPUT * pInput)522 static ADDR_E_RETURNCODE ADDR_API freeSysMem(const ADDR_FREESYSMEM_INPUT *pInput)
523 {
524 free(pInput->pVirtAddr);
525 return ADDR_OK;
526 }
527
ac_addrlib_create(const struct radeon_info * info,uint64_t * max_alignment)528 struct ac_addrlib *ac_addrlib_create(const struct radeon_info *info,
529 uint64_t *max_alignment)
530 {
531 ADDR_CREATE_INPUT addrCreateInput = {0};
532 ADDR_CREATE_OUTPUT addrCreateOutput = {0};
533 ADDR_REGISTER_VALUE regValue = {0};
534 ADDR_CREATE_FLAGS createFlags = {{0}};
535 ADDR_GET_MAX_ALIGNMENTS_OUTPUT addrGetMaxAlignmentsOutput = {0};
536 ADDR_E_RETURNCODE addrRet;
537
538 addrCreateInput.size = sizeof(ADDR_CREATE_INPUT);
539 addrCreateOutput.size = sizeof(ADDR_CREATE_OUTPUT);
540
541 regValue.gbAddrConfig = info->gb_addr_config;
542 createFlags.value = 0;
543
544 addrCreateInput.chipFamily = info->family_id;
545 addrCreateInput.chipRevision = info->chip_external_rev;
546
547 if (addrCreateInput.chipFamily == FAMILY_UNKNOWN)
548 return NULL;
549
550 if (addrCreateInput.chipFamily >= FAMILY_AI) {
551 addrCreateInput.chipEngine = CIASICIDGFXENGINE_ARCTICISLAND;
552 } else {
553 regValue.noOfBanks = info->mc_arb_ramcfg & 0x3;
554 regValue.noOfRanks = (info->mc_arb_ramcfg & 0x4) >> 2;
555
556 regValue.backendDisables = info->enabled_rb_mask;
557 regValue.pTileConfig = info->si_tile_mode_array;
558 regValue.noOfEntries = ARRAY_SIZE(info->si_tile_mode_array);
559 if (addrCreateInput.chipFamily == FAMILY_SI) {
560 regValue.pMacroTileConfig = NULL;
561 regValue.noOfMacroEntries = 0;
562 } else {
563 regValue.pMacroTileConfig = info->cik_macrotile_mode_array;
564 regValue.noOfMacroEntries = ARRAY_SIZE(info->cik_macrotile_mode_array);
565 }
566
567 createFlags.useTileIndex = 1;
568 createFlags.useHtileSliceAlign = 1;
569
570 addrCreateInput.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
571 }
572
573 addrCreateInput.callbacks.allocSysMem = allocSysMem;
574 addrCreateInput.callbacks.freeSysMem = freeSysMem;
575 addrCreateInput.callbacks.debugPrint = 0;
576 addrCreateInput.createFlags = createFlags;
577 addrCreateInput.regValue = regValue;
578
579 addrRet = AddrCreate(&addrCreateInput, &addrCreateOutput);
580 if (addrRet != ADDR_OK)
581 return NULL;
582
583 if (max_alignment) {
584 addrRet = AddrGetMaxAlignments(addrCreateOutput.hLib, &addrGetMaxAlignmentsOutput);
585 if (addrRet == ADDR_OK) {
586 *max_alignment = addrGetMaxAlignmentsOutput.baseAlign;
587 }
588 }
589
590 struct ac_addrlib *addrlib = calloc(1, sizeof(struct ac_addrlib));
591 if (!addrlib) {
592 AddrDestroy(addrCreateOutput.hLib);
593 return NULL;
594 }
595
596 addrlib->handle = addrCreateOutput.hLib;
597 simple_mtx_init(&addrlib->lock, mtx_plain);
598 return addrlib;
599 }
600
ac_addrlib_destroy(struct ac_addrlib * addrlib)601 void ac_addrlib_destroy(struct ac_addrlib *addrlib)
602 {
603 simple_mtx_destroy(&addrlib->lock);
604 AddrDestroy(addrlib->handle);
605 free(addrlib);
606 }
607
ac_addrlib_get_handle(struct ac_addrlib * addrlib)608 void *ac_addrlib_get_handle(struct ac_addrlib *addrlib)
609 {
610 return addrlib->handle;
611 }
612
surf_config_sanity(const struct ac_surf_config * config,unsigned flags)613 static int surf_config_sanity(const struct ac_surf_config *config, unsigned flags)
614 {
615 /* FMASK is allocated together with the color surface and can't be
616 * allocated separately.
617 */
618 assert(!(flags & RADEON_SURF_FMASK));
619 if (flags & RADEON_SURF_FMASK)
620 return -EINVAL;
621
622 /* all dimension must be at least 1 ! */
623 if (!config->info.width || !config->info.height || !config->info.depth ||
624 !config->info.array_size || !config->info.levels)
625 return -EINVAL;
626
627 switch (config->info.samples) {
628 case 0:
629 case 1:
630 case 2:
631 case 4:
632 case 8:
633 break;
634 case 16:
635 if (flags & RADEON_SURF_Z_OR_SBUFFER)
636 return -EINVAL;
637 break;
638 default:
639 return -EINVAL;
640 }
641
642 if (!(flags & RADEON_SURF_Z_OR_SBUFFER)) {
643 switch (config->info.storage_samples) {
644 case 0:
645 case 1:
646 case 2:
647 case 4:
648 case 8:
649 break;
650 default:
651 return -EINVAL;
652 }
653 }
654
655 if (config->is_3d && config->info.array_size > 1)
656 return -EINVAL;
657 if (config->is_cube && config->info.depth > 1)
658 return -EINVAL;
659
660 return 0;
661 }
662
bpe_to_format(struct radeon_surf * surf)663 static unsigned bpe_to_format(struct radeon_surf *surf)
664 {
665 if (surf->blk_w != 1 || surf->blk_h != 1) {
666 if (surf->blk_w == 4 && surf->blk_h == 4) {
667 switch (surf->bpe) {
668 case 8:
669 return ADDR_FMT_BC1;
670 case 16:
671 /* since BC3 and ASTC4x4 has same blk dimension and bpe reporting BC3 also for ASTC4x4.
672 * matching is fine since addrlib needs only blk_w, blk_h and bpe to compute surface
673 * properties.
674 * TODO: If compress_type can be passed to this function, then this ugly BC3 and ASTC4x4
675 * matching can be avoided.
676 */
677 return ADDR_FMT_BC3;
678 default:
679 unreachable("invalid compressed bpe");
680 }
681 } else if (surf->blk_w == 5 && surf->blk_h == 4)
682 return ADDR_FMT_ASTC_5x4;
683 else if (surf->blk_w == 5 && surf->blk_h == 5)
684 return ADDR_FMT_ASTC_5x5;
685 else if (surf->blk_w == 6 && surf->blk_h == 5)
686 return ADDR_FMT_ASTC_6x5;
687 else if (surf->blk_w == 6 && surf->blk_h == 6)
688 return ADDR_FMT_ASTC_6x6;
689 else if (surf->blk_w == 8 && surf->blk_h == 5)
690 return ADDR_FMT_ASTC_8x5;
691 else if (surf->blk_w == 8 && surf->blk_h == 6)
692 return ADDR_FMT_ASTC_8x6;
693 else if (surf->blk_w == 8 && surf->blk_h == 8)
694 return ADDR_FMT_ASTC_8x8;
695 else if (surf->blk_w == 10 && surf->blk_h == 5)
696 return ADDR_FMT_ASTC_10x5;
697 else if (surf->blk_w == 10 && surf->blk_h == 6)
698 return ADDR_FMT_ASTC_10x6;
699 else if (surf->blk_w == 10 && surf->blk_h == 8)
700 return ADDR_FMT_ASTC_10x8;
701 else if (surf->blk_w == 10 && surf->blk_h == 10)
702 return ADDR_FMT_ASTC_10x10;
703 else if (surf->blk_w == 12 && surf->blk_h == 10)
704 return ADDR_FMT_ASTC_12x10;
705 else if (surf->blk_w == 12 && surf->blk_h == 12)
706 return ADDR_FMT_ASTC_12x12;
707 } else {
708 switch (surf->bpe) {
709 case 1:
710 assert(!(surf->flags & RADEON_SURF_ZBUFFER));
711 return ADDR_FMT_8;
712 case 2:
713 assert(surf->flags & RADEON_SURF_ZBUFFER || !(surf->flags & RADEON_SURF_SBUFFER));
714 return ADDR_FMT_16;
715 case 4:
716 assert(surf->flags & RADEON_SURF_ZBUFFER || !(surf->flags & RADEON_SURF_SBUFFER));
717 return ADDR_FMT_32;
718 case 8:
719 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
720 return ADDR_FMT_32_32;
721 case 12:
722 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
723 return ADDR_FMT_32_32_32;
724 case 16:
725 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
726 return ADDR_FMT_32_32_32_32;
727 default:
728 unreachable("invalid bpe");
729 }
730 }
731 return ADDR_FMT_INVALID;
732 }
733
734 /* The addrlib pitch alignment is forced to this number for all chips to support interop
735 * between any 2 chips.
736 */
737 #define LINEAR_PITCH_ALIGNMENT 256
738
gfx6_compute_level(ADDR_HANDLE addrlib,const struct ac_surf_config * config,struct radeon_surf * surf,bool is_stencil,unsigned level,bool compressed,ADDR_COMPUTE_SURFACE_INFO_INPUT * AddrSurfInfoIn,ADDR_COMPUTE_SURFACE_INFO_OUTPUT * AddrSurfInfoOut,ADDR_COMPUTE_DCCINFO_INPUT * AddrDccIn,ADDR_COMPUTE_DCCINFO_OUTPUT * AddrDccOut,ADDR_COMPUTE_HTILE_INFO_INPUT * AddrHtileIn,ADDR_COMPUTE_HTILE_INFO_OUTPUT * AddrHtileOut)739 static int gfx6_compute_level(ADDR_HANDLE addrlib, const struct ac_surf_config *config,
740 struct radeon_surf *surf, bool is_stencil, unsigned level,
741 bool compressed, ADDR_COMPUTE_SURFACE_INFO_INPUT *AddrSurfInfoIn,
742 ADDR_COMPUTE_SURFACE_INFO_OUTPUT *AddrSurfInfoOut,
743 ADDR_COMPUTE_DCCINFO_INPUT *AddrDccIn,
744 ADDR_COMPUTE_DCCINFO_OUTPUT *AddrDccOut,
745 ADDR_COMPUTE_HTILE_INFO_INPUT *AddrHtileIn,
746 ADDR_COMPUTE_HTILE_INFO_OUTPUT *AddrHtileOut)
747 {
748 struct legacy_surf_level *surf_level;
749 struct legacy_surf_dcc_level *dcc_level;
750 ADDR_E_RETURNCODE ret;
751
752 AddrSurfInfoIn->mipLevel = level;
753 AddrSurfInfoIn->width = u_minify(config->info.width, level);
754 AddrSurfInfoIn->height = u_minify(config->info.height, level);
755
756 /* Make GFX6 linear surfaces compatible with all chips for multi-GPU interop. */
757 if (config->info.levels == 1 && AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED &&
758 AddrSurfInfoIn->bpp && util_is_power_of_two_or_zero(AddrSurfInfoIn->bpp)) {
759 unsigned alignment = LINEAR_PITCH_ALIGNMENT / surf->bpe;
760
761 AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, alignment);
762 }
763
764 /* addrlib assumes the bytes/pixel is a divisor of 64, which is not
765 * true for r32g32b32 formats. */
766 if (AddrSurfInfoIn->bpp == 96) {
767 assert(config->info.levels == 1);
768 assert(AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED);
769
770 /* The least common multiple of 64 bytes and 12 bytes/pixel is
771 * 192 bytes, or 16 pixels. */
772 AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, 16);
773 }
774
775 if (config->is_3d)
776 AddrSurfInfoIn->numSlices = u_minify(config->info.depth, level);
777 else if (config->is_cube)
778 AddrSurfInfoIn->numSlices = 6;
779 else
780 AddrSurfInfoIn->numSlices = config->info.array_size;
781
782 if (level > 0) {
783 /* Set the base level pitch. This is needed for calculation
784 * of non-zero levels. */
785 if (is_stencil)
786 AddrSurfInfoIn->basePitch = surf->u.legacy.zs.stencil_level[0].nblk_x;
787 else
788 AddrSurfInfoIn->basePitch = surf->u.legacy.level[0].nblk_x;
789
790 /* Convert blocks to pixels for compressed formats. */
791 if (compressed)
792 AddrSurfInfoIn->basePitch *= surf->blk_w;
793 }
794
795 ret = AddrComputeSurfaceInfo(addrlib, AddrSurfInfoIn, AddrSurfInfoOut);
796 if (ret != ADDR_OK) {
797 return ret;
798 }
799
800 surf_level = is_stencil ? &surf->u.legacy.zs.stencil_level[level] : &surf->u.legacy.level[level];
801 dcc_level = &surf->u.legacy.color.dcc_level[level];
802 surf_level->offset_256B = align64(surf->surf_size, AddrSurfInfoOut->baseAlign) / 256;
803 surf_level->slice_size_dw = AddrSurfInfoOut->sliceSize / 4;
804 surf_level->nblk_x = AddrSurfInfoOut->pitch;
805 surf_level->nblk_y = AddrSurfInfoOut->height;
806
807 switch (AddrSurfInfoOut->tileMode) {
808 case ADDR_TM_LINEAR_ALIGNED:
809 surf_level->mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
810 break;
811 case ADDR_TM_1D_TILED_THIN1:
812 case ADDR_TM_1D_TILED_THICK:
813 case ADDR_TM_PRT_TILED_THIN1:
814 surf_level->mode = RADEON_SURF_MODE_1D;
815 break;
816 case ADDR_TM_2D_TILED_THIN1:
817 case ADDR_TM_PRT_2D_TILED_THIN1:
818 case ADDR_TM_PRT_TILED_THICK:
819 surf_level->mode = RADEON_SURF_MODE_2D;
820 break;
821 default:
822 assert(0);
823 }
824
825 if (is_stencil)
826 surf->u.legacy.zs.stencil_tiling_index[level] = AddrSurfInfoOut->tileIndex;
827 else
828 surf->u.legacy.tiling_index[level] = AddrSurfInfoOut->tileIndex;
829
830 if (AddrSurfInfoIn->flags.prt) {
831 if (level == 0) {
832 surf->prt_tile_width = AddrSurfInfoOut->pitchAlign;
833 surf->prt_tile_height = AddrSurfInfoOut->heightAlign;
834 surf->prt_tile_depth = AddrSurfInfoOut->depthAlign;
835 }
836 if (surf_level->nblk_x >= surf->prt_tile_width &&
837 surf_level->nblk_y >= surf->prt_tile_height) {
838 /* +1 because the current level is not in the miptail */
839 surf->first_mip_tail_level = level + 1;
840 }
841 }
842
843 surf->surf_size = (uint64_t)surf_level->offset_256B * 256 + AddrSurfInfoOut->surfSize;
844
845 /* Clear DCC fields at the beginning. */
846 if (!AddrSurfInfoIn->flags.depth && !AddrSurfInfoIn->flags.stencil)
847 dcc_level->dcc_offset = 0;
848
849 /* The previous level's flag tells us if we can use DCC for this level. */
850 if (AddrSurfInfoIn->flags.dccCompatible && (level == 0 || AddrDccOut->subLvlCompressible)) {
851 bool prev_level_clearable = level == 0 || AddrDccOut->dccRamSizeAligned;
852
853 AddrDccIn->colorSurfSize = AddrSurfInfoOut->surfSize;
854 AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
855 AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
856 AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
857 AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
858
859 ret = AddrComputeDccInfo(addrlib, AddrDccIn, AddrDccOut);
860
861 if (ret == ADDR_OK) {
862 dcc_level->dcc_offset = surf->meta_size;
863 surf->num_meta_levels = level + 1;
864 surf->meta_size = dcc_level->dcc_offset + AddrDccOut->dccRamSize;
865 surf->meta_alignment_log2 = MAX2(surf->meta_alignment_log2, util_logbase2(AddrDccOut->dccRamBaseAlign));
866
867 /* If the DCC size of a subresource (1 mip level or 1 slice)
868 * is not aligned, the DCC memory layout is not contiguous for
869 * that subresource, which means we can't use fast clear.
870 *
871 * We only do fast clears for whole mipmap levels. If we did
872 * per-slice fast clears, the same restriction would apply.
873 * (i.e. only compute the slice size and see if it's aligned)
874 *
875 * The last level can be non-contiguous and still be clearable
876 * if it's interleaved with the next level that doesn't exist.
877 */
878 if (AddrDccOut->dccRamSizeAligned ||
879 (prev_level_clearable && level == config->info.levels - 1))
880 dcc_level->dcc_fast_clear_size = AddrDccOut->dccFastClearSize;
881 else
882 dcc_level->dcc_fast_clear_size = 0;
883
884 /* Compute the DCC slice size because addrlib doesn't
885 * provide this info. As DCC memory is linear (each
886 * slice is the same size) it's easy to compute.
887 */
888 surf->meta_slice_size = AddrDccOut->dccRamSize / config->info.array_size;
889
890 /* For arrays, we have to compute the DCC info again
891 * with one slice size to get a correct fast clear
892 * size.
893 */
894 if (config->info.array_size > 1) {
895 AddrDccIn->colorSurfSize = AddrSurfInfoOut->sliceSize;
896 AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
897 AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
898 AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
899 AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
900
901 ret = AddrComputeDccInfo(addrlib, AddrDccIn, AddrDccOut);
902 if (ret == ADDR_OK) {
903 /* If the DCC memory isn't properly
904 * aligned, the data are interleaved
905 * across slices.
906 */
907 if (AddrDccOut->dccRamSizeAligned)
908 dcc_level->dcc_slice_fast_clear_size = AddrDccOut->dccFastClearSize;
909 else
910 dcc_level->dcc_slice_fast_clear_size = 0;
911 }
912
913 if (surf->flags & RADEON_SURF_CONTIGUOUS_DCC_LAYERS &&
914 surf->meta_slice_size != dcc_level->dcc_slice_fast_clear_size) {
915 surf->meta_size = 0;
916 surf->num_meta_levels = 0;
917 AddrDccOut->subLvlCompressible = false;
918 }
919 } else {
920 dcc_level->dcc_slice_fast_clear_size = dcc_level->dcc_fast_clear_size;
921 }
922 }
923 }
924
925 /* HTILE. */
926 if (!is_stencil && AddrSurfInfoIn->flags.depth && surf_level->mode == RADEON_SURF_MODE_2D &&
927 level == 0 && !(surf->flags & RADEON_SURF_NO_HTILE)) {
928 AddrHtileIn->flags.tcCompatible = AddrSurfInfoOut->tcCompatible;
929 AddrHtileIn->pitch = AddrSurfInfoOut->pitch;
930 AddrHtileIn->height = AddrSurfInfoOut->height;
931 AddrHtileIn->numSlices = AddrSurfInfoOut->depth;
932 AddrHtileIn->blockWidth = ADDR_HTILE_BLOCKSIZE_8;
933 AddrHtileIn->blockHeight = ADDR_HTILE_BLOCKSIZE_8;
934 AddrHtileIn->pTileInfo = AddrSurfInfoOut->pTileInfo;
935 AddrHtileIn->tileIndex = AddrSurfInfoOut->tileIndex;
936 AddrHtileIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
937
938 ret = AddrComputeHtileInfo(addrlib, AddrHtileIn, AddrHtileOut);
939
940 if (ret == ADDR_OK) {
941 surf->meta_size = AddrHtileOut->htileBytes;
942 surf->meta_slice_size = AddrHtileOut->sliceSize;
943 surf->meta_alignment_log2 = util_logbase2(AddrHtileOut->baseAlign);
944 surf->meta_pitch = AddrHtileOut->pitch;
945 surf->num_meta_levels = level + 1;
946 }
947 }
948
949 return 0;
950 }
951
gfx6_set_micro_tile_mode(struct radeon_surf * surf,const struct radeon_info * info)952 static void gfx6_set_micro_tile_mode(struct radeon_surf *surf, const struct radeon_info *info)
953 {
954 uint32_t tile_mode = info->si_tile_mode_array[surf->u.legacy.tiling_index[0]];
955
956 if (info->gfx_level >= GFX7)
957 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE_NEW(tile_mode);
958 else
959 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE(tile_mode);
960 }
961
cik_get_macro_tile_index(struct radeon_surf * surf)962 static unsigned cik_get_macro_tile_index(struct radeon_surf *surf)
963 {
964 unsigned index, tileb;
965
966 tileb = 8 * 8 * surf->bpe;
967 tileb = MIN2(surf->u.legacy.tile_split, tileb);
968
969 for (index = 0; tileb > 64; index++)
970 tileb >>= 1;
971
972 assert(index < 16);
973 return index;
974 }
975
get_display_flag(const struct ac_surf_config * config,const struct radeon_surf * surf)976 static bool get_display_flag(const struct ac_surf_config *config, const struct radeon_surf *surf)
977 {
978 unsigned num_channels = config->info.num_channels;
979 unsigned bpe = surf->bpe;
980
981 /* With modifiers the kernel is in charge of whether it is displayable.
982 * We need to ensure at least 32 pixels pitch alignment, but this is
983 * always the case when the blocksize >= 4K.
984 */
985 if (surf->modifier != DRM_FORMAT_MOD_INVALID)
986 return false;
987
988 if (!config->is_1d && !config->is_3d && !config->is_cube &&
989 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
990 surf->flags & RADEON_SURF_SCANOUT && config->info.samples <= 1 && surf->blk_w <= 2 &&
991 surf->blk_h == 1) {
992 /* subsampled */
993 if (surf->blk_w == 2 && surf->blk_h == 1)
994 return true;
995
996 if (/* RGBA8 or RGBA16F */
997 (bpe >= 4 && bpe <= 8 && num_channels == 4) ||
998 /* R5G6B5 or R5G5B5A1 */
999 (bpe == 2 && num_channels >= 3) ||
1000 /* C8 palette */
1001 (bpe == 1 && num_channels == 1))
1002 return true;
1003 }
1004 return false;
1005 }
1006
1007 /**
1008 * This must be called after the first level is computed.
1009 *
1010 * Copy surface-global settings like pipe/bank config from level 0 surface
1011 * computation, and compute tile swizzle.
1012 */
gfx6_surface_settings(ADDR_HANDLE addrlib,const struct radeon_info * info,const struct ac_surf_config * config,ADDR_COMPUTE_SURFACE_INFO_OUTPUT * csio,struct radeon_surf * surf)1013 static int gfx6_surface_settings(ADDR_HANDLE addrlib, const struct radeon_info *info,
1014 const struct ac_surf_config *config,
1015 ADDR_COMPUTE_SURFACE_INFO_OUTPUT *csio, struct radeon_surf *surf)
1016 {
1017 surf->surf_alignment_log2 = util_logbase2(csio->baseAlign);
1018 surf->u.legacy.pipe_config = csio->pTileInfo->pipeConfig - 1;
1019 gfx6_set_micro_tile_mode(surf, info);
1020
1021 /* For 2D modes only. */
1022 if (csio->tileMode >= ADDR_TM_2D_TILED_THIN1) {
1023 surf->u.legacy.bankw = csio->pTileInfo->bankWidth;
1024 surf->u.legacy.bankh = csio->pTileInfo->bankHeight;
1025 surf->u.legacy.mtilea = csio->pTileInfo->macroAspectRatio;
1026 surf->u.legacy.tile_split = csio->pTileInfo->tileSplitBytes;
1027 surf->u.legacy.num_banks = csio->pTileInfo->banks;
1028 surf->u.legacy.macro_tile_index = csio->macroModeIndex;
1029 } else {
1030 surf->u.legacy.macro_tile_index = 0;
1031 }
1032
1033 /* Compute tile swizzle. */
1034 /* TODO: fix tile swizzle with mipmapping for GFX6 */
1035 if ((info->gfx_level >= GFX7 || config->info.levels == 1) && config->info.surf_index &&
1036 surf->u.legacy.level[0].mode == RADEON_SURF_MODE_2D &&
1037 !(surf->flags & (RADEON_SURF_Z_OR_SBUFFER | RADEON_SURF_SHAREABLE)) &&
1038 !get_display_flag(config, surf)) {
1039 ADDR_COMPUTE_BASE_SWIZZLE_INPUT AddrBaseSwizzleIn = {0};
1040 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT AddrBaseSwizzleOut = {0};
1041
1042 AddrBaseSwizzleIn.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
1043 AddrBaseSwizzleOut.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
1044
1045 AddrBaseSwizzleIn.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
1046 AddrBaseSwizzleIn.tileIndex = csio->tileIndex;
1047 AddrBaseSwizzleIn.macroModeIndex = csio->macroModeIndex;
1048 AddrBaseSwizzleIn.pTileInfo = csio->pTileInfo;
1049 AddrBaseSwizzleIn.tileMode = csio->tileMode;
1050
1051 int r = AddrComputeBaseSwizzle(addrlib, &AddrBaseSwizzleIn, &AddrBaseSwizzleOut);
1052 if (r != ADDR_OK)
1053 return r;
1054
1055 assert(AddrBaseSwizzleOut.tileSwizzle <=
1056 u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1057 surf->tile_swizzle = AddrBaseSwizzleOut.tileSwizzle;
1058 }
1059 return 0;
1060 }
1061
ac_compute_cmask(const struct radeon_info * info,const struct ac_surf_config * config,struct radeon_surf * surf)1062 static void ac_compute_cmask(const struct radeon_info *info, const struct ac_surf_config *config,
1063 struct radeon_surf *surf)
1064 {
1065 unsigned pipe_interleave_bytes = info->pipe_interleave_bytes;
1066 unsigned num_pipes = info->num_tile_pipes;
1067 unsigned cl_width, cl_height;
1068
1069 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER || surf->is_linear ||
1070 (config->info.samples >= 2 && !surf->fmask_size))
1071 return;
1072
1073 assert(info->gfx_level <= GFX8);
1074
1075 switch (num_pipes) {
1076 case 2:
1077 cl_width = 32;
1078 cl_height = 16;
1079 break;
1080 case 4:
1081 cl_width = 32;
1082 cl_height = 32;
1083 break;
1084 case 8:
1085 cl_width = 64;
1086 cl_height = 32;
1087 break;
1088 case 16: /* Hawaii */
1089 cl_width = 64;
1090 cl_height = 64;
1091 break;
1092 default:
1093 assert(0);
1094 return;
1095 }
1096
1097 unsigned base_align = num_pipes * pipe_interleave_bytes;
1098
1099 unsigned width = align(surf->u.legacy.level[0].nblk_x, cl_width * 8);
1100 unsigned height = align(surf->u.legacy.level[0].nblk_y, cl_height * 8);
1101 unsigned slice_elements = (width * height) / (8 * 8);
1102
1103 /* Each element of CMASK is a nibble. */
1104 unsigned slice_bytes = slice_elements / 2;
1105
1106 surf->u.legacy.color.cmask_slice_tile_max = (width * height) / (128 * 128);
1107 if (surf->u.legacy.color.cmask_slice_tile_max)
1108 surf->u.legacy.color.cmask_slice_tile_max -= 1;
1109
1110 unsigned num_layers;
1111 if (config->is_3d)
1112 num_layers = config->info.depth;
1113 else if (config->is_cube)
1114 num_layers = 6;
1115 else
1116 num_layers = config->info.array_size;
1117
1118 surf->cmask_alignment_log2 = util_logbase2(MAX2(256, base_align));
1119 surf->cmask_slice_size = align(slice_bytes, base_align);
1120 surf->cmask_size = surf->cmask_slice_size * num_layers;
1121 }
1122
1123 /**
1124 * Fill in the tiling information in \p surf based on the given surface config.
1125 *
1126 * The following fields of \p surf must be initialized by the caller:
1127 * blk_w, blk_h, bpe, flags.
1128 */
gfx6_compute_surface(ADDR_HANDLE addrlib,const struct radeon_info * info,const struct ac_surf_config * config,enum radeon_surf_mode mode,struct radeon_surf * surf)1129 static int gfx6_compute_surface(ADDR_HANDLE addrlib, const struct radeon_info *info,
1130 const struct ac_surf_config *config, enum radeon_surf_mode mode,
1131 struct radeon_surf *surf)
1132 {
1133 unsigned level;
1134 bool compressed;
1135 ADDR_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
1136 ADDR_COMPUTE_SURFACE_INFO_OUTPUT AddrSurfInfoOut = {0};
1137 ADDR_COMPUTE_DCCINFO_INPUT AddrDccIn = {0};
1138 ADDR_COMPUTE_DCCINFO_OUTPUT AddrDccOut = {0};
1139 ADDR_COMPUTE_HTILE_INFO_INPUT AddrHtileIn = {0};
1140 ADDR_COMPUTE_HTILE_INFO_OUTPUT AddrHtileOut = {0};
1141 ADDR_TILEINFO AddrTileInfoIn = {0};
1142 ADDR_TILEINFO AddrTileInfoOut = {0};
1143 int r;
1144
1145 AddrSurfInfoIn.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT);
1146 AddrSurfInfoOut.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT);
1147 AddrDccIn.size = sizeof(ADDR_COMPUTE_DCCINFO_INPUT);
1148 AddrDccOut.size = sizeof(ADDR_COMPUTE_DCCINFO_OUTPUT);
1149 AddrHtileIn.size = sizeof(ADDR_COMPUTE_HTILE_INFO_INPUT);
1150 AddrHtileOut.size = sizeof(ADDR_COMPUTE_HTILE_INFO_OUTPUT);
1151 AddrSurfInfoOut.pTileInfo = &AddrTileInfoOut;
1152
1153 compressed = surf->blk_w == 4 && surf->blk_h == 4;
1154
1155 /* MSAA requires 2D tiling. */
1156 if (config->info.samples > 1)
1157 mode = RADEON_SURF_MODE_2D;
1158
1159 /* DB doesn't support linear layouts. */
1160 if (surf->flags & (RADEON_SURF_Z_OR_SBUFFER) && mode < RADEON_SURF_MODE_1D)
1161 mode = RADEON_SURF_MODE_1D;
1162
1163 /* Set the requested tiling mode. */
1164 switch (mode) {
1165 case RADEON_SURF_MODE_LINEAR_ALIGNED:
1166 AddrSurfInfoIn.tileMode = ADDR_TM_LINEAR_ALIGNED;
1167 break;
1168 case RADEON_SURF_MODE_1D:
1169 if (surf->flags & RADEON_SURF_PRT)
1170 AddrSurfInfoIn.tileMode = ADDR_TM_PRT_TILED_THIN1;
1171 else
1172 AddrSurfInfoIn.tileMode = ADDR_TM_1D_TILED_THIN1;
1173 break;
1174 case RADEON_SURF_MODE_2D:
1175 if (surf->flags & RADEON_SURF_PRT) {
1176 if (config->is_3d && surf->bpe < 8) {
1177 AddrSurfInfoIn.tileMode = ADDR_TM_PRT_2D_TILED_THICK;
1178 } else {
1179 AddrSurfInfoIn.tileMode = ADDR_TM_PRT_2D_TILED_THIN1;
1180 }
1181 } else
1182 AddrSurfInfoIn.tileMode = ADDR_TM_2D_TILED_THIN1;
1183 break;
1184 default:
1185 assert(0);
1186 }
1187
1188 AddrSurfInfoIn.format = bpe_to_format(surf);
1189 if (!compressed)
1190 AddrDccIn.bpp = AddrSurfInfoIn.bpp = surf->bpe * 8;
1191
1192 /* Setting ADDR_FMT_32_32_32 breaks gfx6-8, while INVALID works. */
1193 if (AddrSurfInfoIn.format == ADDR_FMT_32_32_32)
1194 AddrSurfInfoIn.format = ADDR_FMT_INVALID;
1195
1196 AddrDccIn.numSamples = AddrSurfInfoIn.numSamples = MAX2(1, config->info.samples);
1197 AddrSurfInfoIn.tileIndex = -1;
1198
1199 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER)) {
1200 AddrDccIn.numSamples = AddrSurfInfoIn.numFrags = MAX2(1, config->info.storage_samples);
1201 }
1202
1203 /* Set the micro tile type. */
1204 if (surf->flags & RADEON_SURF_SCANOUT)
1205 AddrSurfInfoIn.tileType = ADDR_DISPLAYABLE;
1206 else if (surf->flags & RADEON_SURF_Z_OR_SBUFFER)
1207 AddrSurfInfoIn.tileType = ADDR_DEPTH_SAMPLE_ORDER;
1208 else
1209 AddrSurfInfoIn.tileType = ADDR_NON_DISPLAYABLE;
1210
1211 AddrSurfInfoIn.flags.color = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
1212 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
1213 AddrSurfInfoIn.flags.cube = config->is_cube;
1214 AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
1215 AddrSurfInfoIn.flags.pow2Pad = config->info.levels > 1;
1216 AddrSurfInfoIn.flags.tcCompatible = (surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE) != 0;
1217 AddrSurfInfoIn.flags.prt = (surf->flags & RADEON_SURF_PRT) != 0;
1218
1219 /* Only degrade the tile mode for space if TC-compatible HTILE hasn't been
1220 * requested, because TC-compatible HTILE requires 2D tiling.
1221 */
1222 AddrSurfInfoIn.flags.opt4Space = !AddrSurfInfoIn.flags.tcCompatible &&
1223 !AddrSurfInfoIn.flags.fmask && config->info.samples <= 1 &&
1224 !(surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE);
1225
1226 /* DCC notes:
1227 * - If we add MSAA support, keep in mind that CB can't decompress 8bpp
1228 * with samples >= 4.
1229 * - Mipmapped array textures have low performance (discovered by a closed
1230 * driver team).
1231 */
1232 AddrSurfInfoIn.flags.dccCompatible =
1233 info->gfx_level >= GFX8 && info->has_graphics && /* disable DCC on compute-only chips */
1234 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && !(surf->flags & RADEON_SURF_DISABLE_DCC) &&
1235 !compressed &&
1236 ((config->info.array_size == 1 && config->info.depth == 1) || config->info.levels == 1);
1237
1238 AddrSurfInfoIn.flags.noStencil =
1239 !(surf->flags & RADEON_SURF_SBUFFER) || (surf->flags & RADEON_SURF_NO_RENDER_TARGET);
1240
1241 AddrSurfInfoIn.flags.compressZ = !!(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
1242
1243 /* On GFX7-GFX8, the DB uses the same pitch and tile mode (except tilesplit)
1244 * for Z and stencil. This can cause a number of problems which we work
1245 * around here:
1246 *
1247 * - a depth part that is incompatible with mipmapped texturing
1248 * - at least on Stoney, entirely incompatible Z/S aspects (e.g.
1249 * incorrect tiling applied to the stencil part, stencil buffer
1250 * memory accesses that go out of bounds) even without mipmapping
1251 *
1252 * Some piglit tests that are prone to different types of related
1253 * failures:
1254 * ./bin/ext_framebuffer_multisample-upsample 2 stencil
1255 * ./bin/framebuffer-blit-levels {draw,read} stencil
1256 * ./bin/ext_framebuffer_multisample-unaligned-blit N {depth,stencil} {msaa,upsample,downsample}
1257 * ./bin/fbo-depth-array fs-writes-{depth,stencil} / {depth,stencil}-{clear,layered-clear,draw}
1258 * ./bin/depthstencil-render-miplevels 1024 d=s=z24_s8
1259 */
1260 int stencil_tile_idx = -1;
1261
1262 if (AddrSurfInfoIn.flags.depth && !AddrSurfInfoIn.flags.noStencil &&
1263 (config->info.levels > 1 || info->family == CHIP_STONEY)) {
1264 /* Compute stencilTileIdx that is compatible with the (depth)
1265 * tileIdx. This degrades the depth surface if necessary to
1266 * ensure that a matching stencilTileIdx exists. */
1267 AddrSurfInfoIn.flags.matchStencilTileCfg = 1;
1268
1269 /* Keep the depth mip-tail compatible with texturing. */
1270 if (config->info.levels > 1 && !(surf->flags & RADEON_SURF_NO_STENCIL_ADJUST))
1271 AddrSurfInfoIn.flags.noStencil = 1;
1272 }
1273
1274 /* Set preferred macrotile parameters. This is usually required
1275 * for shared resources. This is for 2D tiling only. */
1276 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
1277 AddrSurfInfoIn.tileMode >= ADDR_TM_2D_TILED_THIN1 && surf->u.legacy.bankw &&
1278 surf->u.legacy.bankh && surf->u.legacy.mtilea && surf->u.legacy.tile_split) {
1279 /* If any of these parameters are incorrect, the calculation
1280 * will fail. */
1281 AddrTileInfoIn.banks = surf->u.legacy.num_banks;
1282 AddrTileInfoIn.bankWidth = surf->u.legacy.bankw;
1283 AddrTileInfoIn.bankHeight = surf->u.legacy.bankh;
1284 AddrTileInfoIn.macroAspectRatio = surf->u.legacy.mtilea;
1285 AddrTileInfoIn.tileSplitBytes = surf->u.legacy.tile_split;
1286 AddrTileInfoIn.pipeConfig = surf->u.legacy.pipe_config + 1; /* +1 compared to GB_TILE_MODE */
1287 AddrSurfInfoIn.flags.opt4Space = 0;
1288 AddrSurfInfoIn.pTileInfo = &AddrTileInfoIn;
1289
1290 /* If AddrSurfInfoIn.pTileInfo is set, Addrlib doesn't set
1291 * the tile index, because we are expected to know it if
1292 * we know the other parameters.
1293 *
1294 * This is something that can easily be fixed in Addrlib.
1295 * For now, just figure it out here.
1296 * Note that only 2D_TILE_THIN1 is handled here.
1297 */
1298 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1299 assert(AddrSurfInfoIn.tileMode == ADDR_TM_2D_TILED_THIN1);
1300
1301 if (info->gfx_level == GFX6) {
1302 if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE) {
1303 if (surf->bpe == 2)
1304 AddrSurfInfoIn.tileIndex = 11; /* 16bpp */
1305 else
1306 AddrSurfInfoIn.tileIndex = 12; /* 32bpp */
1307 } else {
1308 if (surf->bpe == 1)
1309 AddrSurfInfoIn.tileIndex = 14; /* 8bpp */
1310 else if (surf->bpe == 2)
1311 AddrSurfInfoIn.tileIndex = 15; /* 16bpp */
1312 else if (surf->bpe == 4)
1313 AddrSurfInfoIn.tileIndex = 16; /* 32bpp */
1314 else
1315 AddrSurfInfoIn.tileIndex = 17; /* 64bpp (and 128bpp) */
1316 }
1317 } else {
1318 /* GFX7 - GFX8 */
1319 if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE)
1320 AddrSurfInfoIn.tileIndex = 10; /* 2D displayable */
1321 else
1322 AddrSurfInfoIn.tileIndex = 14; /* 2D non-displayable */
1323
1324 /* Addrlib doesn't set this if tileIndex is forced like above. */
1325 AddrSurfInfoOut.macroModeIndex = cik_get_macro_tile_index(surf);
1326 }
1327 }
1328
1329 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
1330 surf->num_meta_levels = 0;
1331 surf->surf_size = 0;
1332 surf->meta_size = 0;
1333 surf->meta_slice_size = 0;
1334 surf->meta_alignment_log2 = 0;
1335
1336 const bool only_stencil =
1337 (surf->flags & RADEON_SURF_SBUFFER) && !(surf->flags & RADEON_SURF_ZBUFFER);
1338
1339 /* Calculate texture layout information. */
1340 if (!only_stencil) {
1341 for (level = 0; level < config->info.levels; level++) {
1342 r = gfx6_compute_level(addrlib, config, surf, false, level, compressed, &AddrSurfInfoIn,
1343 &AddrSurfInfoOut, &AddrDccIn, &AddrDccOut, &AddrHtileIn,
1344 &AddrHtileOut);
1345 if (r)
1346 return r;
1347
1348 if (level > 0)
1349 continue;
1350
1351 if (!AddrSurfInfoOut.tcCompatible) {
1352 AddrSurfInfoIn.flags.tcCompatible = 0;
1353 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1354 }
1355
1356 if (AddrSurfInfoIn.flags.matchStencilTileCfg) {
1357 AddrSurfInfoIn.flags.matchStencilTileCfg = 0;
1358 AddrSurfInfoIn.tileIndex = AddrSurfInfoOut.tileIndex;
1359 stencil_tile_idx = AddrSurfInfoOut.stencilTileIdx;
1360
1361 assert(stencil_tile_idx >= 0);
1362 }
1363
1364 r = gfx6_surface_settings(addrlib, info, config, &AddrSurfInfoOut, surf);
1365 if (r)
1366 return r;
1367 }
1368 }
1369
1370 /* Calculate texture layout information for stencil. */
1371 if (surf->flags & RADEON_SURF_SBUFFER) {
1372 AddrSurfInfoIn.tileIndex = stencil_tile_idx;
1373 AddrSurfInfoIn.bpp = 8;
1374 AddrSurfInfoIn.format = ADDR_FMT_8;
1375 AddrSurfInfoIn.flags.depth = 0;
1376 AddrSurfInfoIn.flags.stencil = 1;
1377 AddrSurfInfoIn.flags.tcCompatible = 0;
1378 /* This will be ignored if AddrSurfInfoIn.pTileInfo is NULL. */
1379 AddrTileInfoIn.tileSplitBytes = surf->u.legacy.stencil_tile_split;
1380
1381 for (level = 0; level < config->info.levels; level++) {
1382 r = gfx6_compute_level(addrlib, config, surf, true, level, compressed, &AddrSurfInfoIn,
1383 &AddrSurfInfoOut, &AddrDccIn, &AddrDccOut, NULL, NULL);
1384 if (r)
1385 return r;
1386
1387 /* DB uses the depth pitch for both stencil and depth. */
1388 if (!only_stencil) {
1389 if (surf->u.legacy.zs.stencil_level[level].nblk_x != surf->u.legacy.level[level].nblk_x)
1390 surf->u.legacy.stencil_adjusted = true;
1391 } else {
1392 surf->u.legacy.level[level].nblk_x = surf->u.legacy.zs.stencil_level[level].nblk_x;
1393 }
1394
1395 if (level == 0) {
1396 if (only_stencil) {
1397 r = gfx6_surface_settings(addrlib, info, config, &AddrSurfInfoOut, surf);
1398 if (r)
1399 return r;
1400 }
1401
1402 /* For 2D modes only. */
1403 if (AddrSurfInfoOut.tileMode >= ADDR_TM_2D_TILED_THIN1) {
1404 surf->u.legacy.stencil_tile_split = AddrSurfInfoOut.pTileInfo->tileSplitBytes;
1405 }
1406 }
1407 }
1408 }
1409
1410 /* Compute FMASK. */
1411 if (config->info.samples >= 2 && AddrSurfInfoIn.flags.color && info->has_graphics &&
1412 !(surf->flags & RADEON_SURF_NO_FMASK)) {
1413 ADDR_COMPUTE_FMASK_INFO_INPUT fin = {0};
1414 ADDR_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
1415 ADDR_TILEINFO fmask_tile_info = {0};
1416
1417 fin.size = sizeof(fin);
1418 fout.size = sizeof(fout);
1419
1420 fin.tileMode = AddrSurfInfoOut.tileMode;
1421 fin.pitch = AddrSurfInfoOut.pitch;
1422 fin.height = config->info.height;
1423 fin.numSlices = AddrSurfInfoIn.numSlices;
1424 fin.numSamples = AddrSurfInfoIn.numSamples;
1425 fin.numFrags = AddrSurfInfoIn.numFrags;
1426 fin.tileIndex = -1;
1427 fout.pTileInfo = &fmask_tile_info;
1428
1429 r = AddrComputeFmaskInfo(addrlib, &fin, &fout);
1430 if (r)
1431 return r;
1432
1433 surf->fmask_size = fout.fmaskBytes;
1434 surf->fmask_alignment_log2 = util_logbase2(fout.baseAlign);
1435 surf->fmask_slice_size = fout.sliceSize;
1436 surf->fmask_tile_swizzle = 0;
1437
1438 surf->u.legacy.color.fmask.slice_tile_max = (fout.pitch * fout.height) / 64;
1439 if (surf->u.legacy.color.fmask.slice_tile_max)
1440 surf->u.legacy.color.fmask.slice_tile_max -= 1;
1441
1442 surf->u.legacy.color.fmask.tiling_index = fout.tileIndex;
1443 surf->u.legacy.color.fmask.bankh = fout.pTileInfo->bankHeight;
1444 surf->u.legacy.color.fmask.pitch_in_pixels = fout.pitch;
1445
1446 /* Compute tile swizzle for FMASK. */
1447 if (config->info.fmask_surf_index && !(surf->flags & RADEON_SURF_SHAREABLE)) {
1448 ADDR_COMPUTE_BASE_SWIZZLE_INPUT xin = {0};
1449 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT xout = {0};
1450
1451 xin.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
1452 xout.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
1453
1454 /* This counter starts from 1 instead of 0. */
1455 xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
1456 xin.tileIndex = fout.tileIndex;
1457 xin.macroModeIndex = fout.macroModeIndex;
1458 xin.pTileInfo = fout.pTileInfo;
1459 xin.tileMode = fin.tileMode;
1460
1461 int r = AddrComputeBaseSwizzle(addrlib, &xin, &xout);
1462 if (r != ADDR_OK)
1463 return r;
1464
1465 assert(xout.tileSwizzle <= u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1466 surf->fmask_tile_swizzle = xout.tileSwizzle;
1467 }
1468 }
1469
1470 /* Recalculate the whole DCC miptree size including disabled levels.
1471 * This is what addrlib does, but calling addrlib would be a lot more
1472 * complicated.
1473 */
1474 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && surf->meta_size && config->info.levels > 1) {
1475 /* The smallest miplevels that are never compressed by DCC
1476 * still read the DCC buffer via TC if the base level uses DCC,
1477 * and for some reason the DCC buffer needs to be larger if
1478 * the miptree uses non-zero tile_swizzle. Otherwise there are
1479 * VM faults.
1480 *
1481 * "dcc_alignment * 4" was determined by trial and error.
1482 */
1483 surf->meta_size = align64(surf->surf_size >> 8, (1ull << surf->meta_alignment_log2) * 4);
1484 }
1485
1486 /* Make sure HTILE covers the whole miptree, because the shader reads
1487 * TC-compatible HTILE even for levels where it's disabled by DB.
1488 */
1489 if (surf->flags & (RADEON_SURF_Z_OR_SBUFFER | RADEON_SURF_TC_COMPATIBLE_HTILE) &&
1490 surf->meta_size && config->info.levels > 1) {
1491 /* MSAA can't occur with levels > 1, so ignore the sample count. */
1492 const unsigned total_pixels = surf->surf_size / surf->bpe;
1493 const unsigned htile_block_size = 8 * 8;
1494 const unsigned htile_element_size = 4;
1495
1496 surf->meta_size = (total_pixels / htile_block_size) * htile_element_size;
1497 surf->meta_size = align(surf->meta_size, 1 << surf->meta_alignment_log2);
1498 } else if (surf->flags & RADEON_SURF_Z_OR_SBUFFER && !surf->meta_size) {
1499 /* Unset this if HTILE is not present. */
1500 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1501 }
1502
1503 surf->is_linear = (only_stencil ? surf->u.legacy.zs.stencil_level[0].mode :
1504 surf->u.legacy.level[0].mode) == RADEON_SURF_MODE_LINEAR_ALIGNED;
1505
1506 surf->is_displayable = surf->is_linear || surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY ||
1507 surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER;
1508
1509 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
1510 * used at the same time. This case is not currently expected to occur
1511 * because we don't use rotated. Enforce this restriction on all chips
1512 * to facilitate testing.
1513 */
1514 if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER) {
1515 assert(!"rotate micro tile mode is unsupported");
1516 return ADDR_ERROR;
1517 }
1518
1519 ac_compute_cmask(info, config, surf);
1520 return 0;
1521 }
1522
1523 /* This is only called when expecting a tiled layout. */
gfx9_get_preferred_swizzle_mode(ADDR_HANDLE addrlib,const struct radeon_info * info,struct radeon_surf * surf,ADDR2_COMPUTE_SURFACE_INFO_INPUT * in,bool is_fmask,AddrSwizzleMode * swizzle_mode)1524 static int gfx9_get_preferred_swizzle_mode(ADDR_HANDLE addrlib, const struct radeon_info *info,
1525 struct radeon_surf *surf,
1526 ADDR2_COMPUTE_SURFACE_INFO_INPUT *in, bool is_fmask,
1527 AddrSwizzleMode *swizzle_mode)
1528 {
1529 ADDR_E_RETURNCODE ret;
1530 ADDR2_GET_PREFERRED_SURF_SETTING_INPUT sin = {0};
1531 ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT sout = {0};
1532
1533 sin.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_INPUT);
1534 sout.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT);
1535
1536 sin.flags = in->flags;
1537 sin.resourceType = in->resourceType;
1538 sin.format = in->format;
1539 sin.resourceLoction = ADDR_RSRC_LOC_INVIS;
1540
1541 /* TODO: We could allow some of these: */
1542 sin.forbiddenBlock.micro = 1; /* don't allow the 256B swizzle modes */
1543
1544 if (info->gfx_level >= GFX11) {
1545 /* Disable 256K on APUs because it doesn't work with DAL. */
1546 if (!info->has_dedicated_vram) {
1547 sin.forbiddenBlock.gfx11.thin256KB = 1;
1548 sin.forbiddenBlock.gfx11.thick256KB = 1;
1549 }
1550 } else {
1551 sin.forbiddenBlock.var = 1; /* don't allow the variable-sized swizzle modes */
1552 }
1553
1554 sin.bpp = in->bpp;
1555 sin.width = in->width;
1556 sin.height = in->height;
1557 sin.numSlices = in->numSlices;
1558 sin.numMipLevels = in->numMipLevels;
1559 sin.numSamples = in->numSamples;
1560 sin.numFrags = in->numFrags;
1561
1562 if (is_fmask) {
1563 sin.flags.display = 0;
1564 sin.flags.color = 0;
1565 sin.flags.fmask = 1;
1566 }
1567
1568 /* With PRT images we want to force 64 KiB block size so that the image
1569 * created is consistent with the format properties returned in Vulkan
1570 * independent of the image. */
1571 if (sin.flags.prt) {
1572 sin.forbiddenBlock.macroThin4KB = 1;
1573 sin.forbiddenBlock.macroThick4KB = 1;
1574 if (info->gfx_level >= GFX11) {
1575 sin.forbiddenBlock.gfx11.thin256KB = 1;
1576 sin.forbiddenBlock.gfx11.thick256KB = 1;
1577 }
1578 sin.forbiddenBlock.linear = 1;
1579 }
1580
1581 if (surf->flags & RADEON_SURF_FORCE_MICRO_TILE_MODE) {
1582 sin.forbiddenBlock.linear = 1;
1583
1584 if (surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY)
1585 sin.preferredSwSet.sw_D = 1;
1586 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_STANDARD)
1587 sin.preferredSwSet.sw_S = 1;
1588 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_DEPTH)
1589 sin.preferredSwSet.sw_Z = 1;
1590 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER)
1591 sin.preferredSwSet.sw_R = 1;
1592 }
1593
1594 if (info->gfx_level >= GFX10 && in->resourceType == ADDR_RSRC_TEX_3D && in->numSlices > 1) {
1595 /* 3D textures should use S swizzle modes for the best performance.
1596 * THe only exception is 3D render targets, which prefer 64KB_D_X.
1597 *
1598 * 3D texture sampler performance with a very large 3D texture:
1599 * ADDR_SW_64KB_R_X = 19 FPS (DCC on), 26 FPS (DCC off)
1600 * ADDR_SW_64KB_Z_X = 25 FPS
1601 * ADDR_SW_64KB_D_X = 53 FPS
1602 * ADDR_SW_4KB_S = 53 FPS
1603 * ADDR_SW_64KB_S = 53 FPS
1604 * ADDR_SW_64KB_S_T = 61 FPS
1605 * ADDR_SW_4KB_S_X = 63 FPS
1606 * ADDR_SW_64KB_S_X = 62 FPS
1607 */
1608 sin.preferredSwSet.sw_S = 1;
1609 }
1610
1611 ret = Addr2GetPreferredSurfaceSetting(addrlib, &sin, &sout);
1612 if (ret != ADDR_OK)
1613 return ret;
1614
1615 *swizzle_mode = sout.swizzleMode;
1616 return 0;
1617 }
1618
is_dcc_supported_by_CB(const struct radeon_info * info,unsigned sw_mode)1619 static bool is_dcc_supported_by_CB(const struct radeon_info *info, unsigned sw_mode)
1620 {
1621 switch (info->gfx_level) {
1622 case GFX9:
1623 return sw_mode != ADDR_SW_LINEAR;
1624
1625 case GFX10:
1626 case GFX10_3:
1627 return sw_mode == ADDR_SW_64KB_Z_X || sw_mode == ADDR_SW_64KB_R_X;
1628
1629 case GFX11:
1630 case GFX11_5:
1631 return sw_mode == ADDR_SW_64KB_Z_X || sw_mode == ADDR_SW_64KB_R_X ||
1632 sw_mode == ADDR_SW_256KB_Z_X || sw_mode == ADDR_SW_256KB_R_X;
1633
1634 default:
1635 unreachable("invalid gfx_level");
1636 }
1637 }
1638
is_dcc_supported_by_L2(const struct radeon_info * info,const struct radeon_surf * surf)1639 ASSERTED static bool is_dcc_supported_by_L2(const struct radeon_info *info,
1640 const struct radeon_surf *surf)
1641 {
1642 bool single_indep = surf->u.gfx9.color.dcc.independent_64B_blocks !=
1643 surf->u.gfx9.color.dcc.independent_128B_blocks;
1644 bool valid_64b = surf->u.gfx9.color.dcc.independent_64B_blocks &&
1645 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B;
1646 bool valid_128b = surf->u.gfx9.color.dcc.independent_128B_blocks &&
1647 (surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_128B ||
1648 (info->gfx_level >= GFX11_5 &&
1649 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_256B));
1650
1651 if (info->gfx_level <= GFX9) {
1652 /* Only independent 64B blocks are supported. */
1653 return single_indep && valid_64b;
1654 }
1655
1656 if (info->family == CHIP_NAVI10) {
1657 /* Only independent 128B blocks are supported. */
1658 return single_indep && valid_128b;
1659 }
1660
1661 if (info->family == CHIP_NAVI12 || info->family == CHIP_NAVI14) {
1662 /* Either 64B or 128B can be used, but the INDEPENDENT_*_BLOCKS setting must match.
1663 * If 64B is used, DCC image stores are unsupported.
1664 */
1665 return single_indep && (valid_64b || valid_128b);
1666 }
1667
1668 /* Valid settings are the same as NAVI14 + (64B && 128B && max_compressed_block_size == 64B) */
1669 return (single_indep && (valid_64b || valid_128b)) || valid_64b;
1670 }
1671
gfx10_DCN_requires_independent_64B_blocks(const struct radeon_info * info,const struct ac_surf_config * config)1672 static bool gfx10_DCN_requires_independent_64B_blocks(const struct radeon_info *info,
1673 const struct ac_surf_config *config)
1674 {
1675 assert(info->gfx_level >= GFX10);
1676
1677 /* Older kernels have buggy DAL. */
1678 if (info->drm_minor <= 43)
1679 return true;
1680
1681 /* For 4K, DCN requires INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B. */
1682 return config->info.width > 2560 || config->info.height > 2560;
1683 }
1684
ac_modifier_max_extent(const struct radeon_info * info,uint64_t modifier,uint32_t * width,uint32_t * height)1685 void ac_modifier_max_extent(const struct radeon_info *info,
1686 uint64_t modifier, uint32_t *width, uint32_t *height)
1687 {
1688 /* DCC is supported with any size. The maximum width per display pipe is 5760, but multiple
1689 * display pipes can be used to drive the display.
1690 */
1691 *width = 16384;
1692 *height = 16384;
1693
1694 if (ac_modifier_has_dcc(modifier)) {
1695 bool independent_64B_blocks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
1696
1697 if (info->gfx_level >= GFX10 && !independent_64B_blocks) {
1698 /* For 4K, DCN requires INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B. */
1699 *width = 2560;
1700 *height = 2560;
1701 }
1702 }
1703 }
1704
gfx9_is_dcc_supported_by_DCN(const struct radeon_info * info,const struct ac_surf_config * config,const struct radeon_surf * surf,bool rb_aligned,bool pipe_aligned)1705 static bool gfx9_is_dcc_supported_by_DCN(const struct radeon_info *info,
1706 const struct ac_surf_config *config,
1707 const struct radeon_surf *surf, bool rb_aligned,
1708 bool pipe_aligned)
1709 {
1710 if (!info->use_display_dcc_unaligned && !info->use_display_dcc_with_retile_blit)
1711 return false;
1712
1713 /* 16bpp and 64bpp are more complicated, so they are disallowed for now. */
1714 if (surf->bpe != 4)
1715 return false;
1716
1717 /* Handle unaligned DCC. */
1718 if (info->use_display_dcc_unaligned && (rb_aligned || pipe_aligned))
1719 return false;
1720
1721 switch (info->gfx_level) {
1722 case GFX9:
1723 /* There are more constraints, but we always set
1724 * INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B,
1725 * which always works.
1726 */
1727 assert(surf->u.gfx9.color.dcc.independent_64B_blocks &&
1728 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B);
1729 return true;
1730 case GFX10:
1731 case GFX10_3:
1732 case GFX11:
1733 /* DCN requires INDEPENDENT_128B_BLOCKS = 0 only on Navi1x. */
1734 if (info->gfx_level == GFX10 && surf->u.gfx9.color.dcc.independent_128B_blocks)
1735 return false;
1736
1737 return (!gfx10_DCN_requires_independent_64B_blocks(info, config) ||
1738 (surf->u.gfx9.color.dcc.independent_64B_blocks &&
1739 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B));
1740 case GFX11_5:
1741 // TODO: clarify DCN support for 256B compressed block sizes and other modes with the DAL team
1742 return true;
1743 default:
1744 unreachable("unhandled chip");
1745 return false;
1746 }
1747 }
1748
ac_copy_dcc_equation(const struct radeon_info * info,ADDR2_COMPUTE_DCCINFO_OUTPUT * dcc,struct gfx9_meta_equation * equation)1749 static void ac_copy_dcc_equation(const struct radeon_info *info,
1750 ADDR2_COMPUTE_DCCINFO_OUTPUT *dcc,
1751 struct gfx9_meta_equation *equation)
1752 {
1753 equation->meta_block_width = dcc->metaBlkWidth;
1754 equation->meta_block_height = dcc->metaBlkHeight;
1755 equation->meta_block_depth = dcc->metaBlkDepth;
1756
1757 if (info->gfx_level >= GFX10) {
1758 /* gfx9_meta_equation doesn't store the first 4 and the last 8 elements. They must be 0. */
1759 for (unsigned i = 0; i < 4; i++)
1760 assert(dcc->equation.gfx10_bits[i] == 0);
1761
1762 for (unsigned i = ARRAY_SIZE(equation->u.gfx10_bits) + 4; i < 68; i++)
1763 assert(dcc->equation.gfx10_bits[i] == 0);
1764
1765 memcpy(equation->u.gfx10_bits, dcc->equation.gfx10_bits + 4,
1766 sizeof(equation->u.gfx10_bits));
1767 } else {
1768 assert(dcc->equation.gfx9.num_bits <= ARRAY_SIZE(equation->u.gfx9.bit));
1769
1770 equation->u.gfx9.num_bits = dcc->equation.gfx9.num_bits;
1771 equation->u.gfx9.num_pipe_bits = dcc->equation.gfx9.numPipeBits;
1772 for (unsigned b = 0; b < ARRAY_SIZE(equation->u.gfx9.bit); b++) {
1773 for (unsigned c = 0; c < ARRAY_SIZE(equation->u.gfx9.bit[b].coord); c++) {
1774 equation->u.gfx9.bit[b].coord[c].dim = dcc->equation.gfx9.bit[b].coord[c].dim;
1775 equation->u.gfx9.bit[b].coord[c].ord = dcc->equation.gfx9.bit[b].coord[c].ord;
1776 }
1777 }
1778 }
1779 }
1780
ac_copy_cmask_equation(const struct radeon_info * info,ADDR2_COMPUTE_CMASK_INFO_OUTPUT * cmask,struct gfx9_meta_equation * equation)1781 static void ac_copy_cmask_equation(const struct radeon_info *info,
1782 ADDR2_COMPUTE_CMASK_INFO_OUTPUT *cmask,
1783 struct gfx9_meta_equation *equation)
1784 {
1785 assert(info->gfx_level < GFX11);
1786
1787 equation->meta_block_width = cmask->metaBlkWidth;
1788 equation->meta_block_height = cmask->metaBlkHeight;
1789 equation->meta_block_depth = 1;
1790
1791 if (info->gfx_level == GFX9) {
1792 assert(cmask->equation.gfx9.num_bits <= ARRAY_SIZE(equation->u.gfx9.bit));
1793
1794 equation->u.gfx9.num_bits = cmask->equation.gfx9.num_bits;
1795 equation->u.gfx9.num_pipe_bits = cmask->equation.gfx9.numPipeBits;
1796 for (unsigned b = 0; b < ARRAY_SIZE(equation->u.gfx9.bit); b++) {
1797 for (unsigned c = 0; c < ARRAY_SIZE(equation->u.gfx9.bit[b].coord); c++) {
1798 equation->u.gfx9.bit[b].coord[c].dim = cmask->equation.gfx9.bit[b].coord[c].dim;
1799 equation->u.gfx9.bit[b].coord[c].ord = cmask->equation.gfx9.bit[b].coord[c].ord;
1800 }
1801 }
1802 }
1803 }
1804
ac_copy_htile_equation(const struct radeon_info * info,ADDR2_COMPUTE_HTILE_INFO_OUTPUT * htile,struct gfx9_meta_equation * equation)1805 static void ac_copy_htile_equation(const struct radeon_info *info,
1806 ADDR2_COMPUTE_HTILE_INFO_OUTPUT *htile,
1807 struct gfx9_meta_equation *equation)
1808 {
1809 equation->meta_block_width = htile->metaBlkWidth;
1810 equation->meta_block_height = htile->metaBlkHeight;
1811
1812 /* gfx9_meta_equation doesn't store the first 8 and the last 4 elements. They must be 0. */
1813 for (unsigned i = 0; i < 8; i++)
1814 assert(htile->equation.gfx10_bits[i] == 0);
1815
1816 for (unsigned i = ARRAY_SIZE(equation->u.gfx10_bits) + 8; i < 72; i++)
1817 assert(htile->equation.gfx10_bits[i] == 0);
1818
1819 memcpy(equation->u.gfx10_bits, htile->equation.gfx10_bits + 8,
1820 sizeof(equation->u.gfx10_bits));
1821 }
1822
gfx9_compute_miptree(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct ac_surf_config * config,struct radeon_surf * surf,bool compressed,ADDR2_COMPUTE_SURFACE_INFO_INPUT * in)1823 static int gfx9_compute_miptree(struct ac_addrlib *addrlib, const struct radeon_info *info,
1824 const struct ac_surf_config *config, struct radeon_surf *surf,
1825 bool compressed, ADDR2_COMPUTE_SURFACE_INFO_INPUT *in)
1826 {
1827 ADDR2_MIP_INFO mip_info[RADEON_SURF_MAX_LEVELS] = {0};
1828 ADDR2_COMPUTE_SURFACE_INFO_OUTPUT out = {0};
1829 ADDR_E_RETURNCODE ret;
1830
1831 out.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_OUTPUT);
1832 out.pMipInfo = mip_info;
1833
1834 ret = Addr2ComputeSurfaceInfo(addrlib->handle, in, &out);
1835 if (ret != ADDR_OK)
1836 return ret;
1837
1838 if (in->flags.prt) {
1839 surf->prt_tile_width = out.blockWidth;
1840 surf->prt_tile_height = out.blockHeight;
1841 surf->prt_tile_depth = out.blockSlices;
1842
1843 surf->first_mip_tail_level = out.firstMipIdInTail;
1844
1845 for (unsigned i = 0; i < in->numMipLevels; i++) {
1846 surf->u.gfx9.prt_level_offset[i] = mip_info[i].macroBlockOffset + mip_info[i].mipTailOffset;
1847
1848 if (info->gfx_level >= GFX10)
1849 surf->u.gfx9.prt_level_pitch[i] = mip_info[i].pitch;
1850 else
1851 surf->u.gfx9.prt_level_pitch[i] = out.mipChainPitch;
1852 }
1853 }
1854
1855 if (in->flags.stencil) {
1856 surf->u.gfx9.zs.stencil_swizzle_mode = in->swizzleMode;
1857 surf->u.gfx9.zs.stencil_epitch =
1858 out.epitchIsHeight ? out.mipChainHeight - 1 : out.mipChainPitch - 1;
1859 surf->surf_alignment_log2 = MAX2(surf->surf_alignment_log2, util_logbase2(out.baseAlign));
1860 surf->u.gfx9.zs.stencil_offset = align(surf->surf_size, out.baseAlign);
1861 surf->surf_size = surf->u.gfx9.zs.stencil_offset + out.surfSize;
1862 return 0;
1863 }
1864
1865 surf->u.gfx9.swizzle_mode = in->swizzleMode;
1866 surf->u.gfx9.epitch = out.epitchIsHeight ? out.mipChainHeight - 1 : out.mipChainPitch - 1;
1867
1868 /* CMASK fast clear uses these even if FMASK isn't allocated.
1869 * FMASK only supports the Z swizzle modes, whose numbers are multiples of 4.
1870 */
1871 if (!in->flags.depth) {
1872 surf->u.gfx9.color.fmask_swizzle_mode = surf->u.gfx9.swizzle_mode & ~0x3;
1873 surf->u.gfx9.color.fmask_epitch = surf->u.gfx9.epitch;
1874 }
1875
1876 surf->u.gfx9.surf_slice_size = out.sliceSize;
1877 surf->u.gfx9.surf_pitch = out.pitch;
1878 surf->u.gfx9.surf_height = out.height;
1879 surf->surf_size = out.surfSize;
1880 surf->surf_alignment_log2 = util_logbase2(out.baseAlign);
1881
1882 const int linear_alignment =
1883 util_next_power_of_two(LINEAR_PITCH_ALIGNMENT / surf->bpe);
1884
1885 if (!compressed && surf->blk_w > 1 && out.pitch == out.pixelPitch &&
1886 surf->u.gfx9.swizzle_mode == ADDR_SW_LINEAR &&
1887 in->numMipLevels == 1) {
1888 /* Divide surf_pitch (= pitch in pixels) by blk_w to get a
1889 * pitch in elements instead because that's what the hardware needs
1890 * in resource descriptors.
1891 * See the comment in si_descriptors.c.
1892 */
1893 surf->u.gfx9.surf_pitch = align(surf->u.gfx9.surf_pitch / surf->blk_w,
1894 linear_alignment);
1895 surf->u.gfx9.epitch = surf->u.gfx9.surf_pitch - 1;
1896 /* Adjust surf_slice_size and surf_size to reflect the change made to surf_pitch. */
1897 surf->u.gfx9.surf_slice_size = (uint64_t)surf->u.gfx9.surf_pitch * out.height * surf->bpe;
1898 surf->surf_size = surf->u.gfx9.surf_slice_size * in->numSlices;
1899
1900 for (unsigned i = 0; i < in->numMipLevels; i++) {
1901 surf->u.gfx9.offset[i] = mip_info[i].offset;
1902 /* Adjust pitch like we did for surf_pitch */
1903 surf->u.gfx9.pitch[i] = align(mip_info[i].pitch / surf->blk_w,
1904 linear_alignment);
1905 }
1906 surf->u.gfx9.base_mip_width = surf->u.gfx9.surf_pitch;
1907 } else if (in->swizzleMode == ADDR_SW_LINEAR) {
1908 for (unsigned i = 0; i < in->numMipLevels; i++) {
1909 surf->u.gfx9.offset[i] = mip_info[i].offset;
1910 surf->u.gfx9.pitch[i] = mip_info[i].pitch;
1911 }
1912 surf->u.gfx9.base_mip_width = surf->u.gfx9.surf_pitch;
1913 } else {
1914 surf->u.gfx9.base_mip_width = mip_info[0].pitch;
1915 }
1916
1917 surf->u.gfx9.base_mip_height = mip_info[0].height;
1918
1919 if (in->flags.depth) {
1920 assert(in->swizzleMode != ADDR_SW_LINEAR);
1921
1922 if (surf->flags & RADEON_SURF_NO_HTILE)
1923 return 0;
1924
1925 /* HTILE */
1926 ADDR2_COMPUTE_HTILE_INFO_INPUT hin = {0};
1927 ADDR2_COMPUTE_HTILE_INFO_OUTPUT hout = {0};
1928 ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {0};
1929
1930 hin.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_INPUT);
1931 hout.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_OUTPUT);
1932 hout.pMipInfo = meta_mip_info;
1933
1934 assert(in->flags.metaPipeUnaligned == 0);
1935 assert(in->flags.metaRbUnaligned == 0);
1936
1937 hin.hTileFlags.pipeAligned = 1;
1938 hin.hTileFlags.rbAligned = 1;
1939 hin.depthFlags = in->flags;
1940 hin.swizzleMode = in->swizzleMode;
1941 hin.unalignedWidth = in->width;
1942 hin.unalignedHeight = in->height;
1943 hin.numSlices = in->numSlices;
1944 hin.numMipLevels = in->numMipLevels;
1945 hin.firstMipIdInTail = out.firstMipIdInTail;
1946
1947 ret = Addr2ComputeHtileInfo(addrlib->handle, &hin, &hout);
1948 if (ret != ADDR_OK)
1949 return ret;
1950
1951 surf->meta_size = hout.htileBytes;
1952 surf->meta_slice_size = hout.sliceSize;
1953 surf->meta_alignment_log2 = util_logbase2(hout.baseAlign);
1954 surf->meta_pitch = hout.pitch;
1955 surf->num_meta_levels = in->numMipLevels;
1956
1957 for (unsigned i = 0; i < in->numMipLevels; i++) {
1958 surf->u.gfx9.meta_levels[i].offset = meta_mip_info[i].offset;
1959 surf->u.gfx9.meta_levels[i].size = meta_mip_info[i].sliceSize;
1960
1961 if (meta_mip_info[i].inMiptail) {
1962 /* GFX10 can only compress the first level
1963 * in the mip tail.
1964 */
1965 surf->num_meta_levels = i + 1;
1966 break;
1967 }
1968 }
1969
1970 if (!surf->num_meta_levels)
1971 surf->meta_size = 0;
1972
1973 if (info->gfx_level >= GFX10)
1974 ac_copy_htile_equation(info, &hout, &surf->u.gfx9.zs.htile_equation);
1975 return 0;
1976 }
1977
1978 {
1979 /* Compute tile swizzle for the color surface.
1980 * All *_X and *_T modes can use the swizzle.
1981 */
1982 if (config->info.surf_index && in->swizzleMode >= ADDR_SW_64KB_Z_T && !out.mipChainInTail &&
1983 !(surf->flags & RADEON_SURF_SHAREABLE) && !in->flags.display) {
1984 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
1985 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
1986
1987 xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
1988 xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
1989
1990 xin.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
1991 xin.flags = in->flags;
1992 xin.swizzleMode = in->swizzleMode;
1993 xin.resourceType = in->resourceType;
1994 xin.format = in->format;
1995 xin.numSamples = in->numSamples;
1996 xin.numFrags = in->numFrags;
1997
1998 ret = Addr2ComputePipeBankXor(addrlib->handle, &xin, &xout);
1999 if (ret != ADDR_OK)
2000 return ret;
2001
2002 assert(xout.pipeBankXor <= u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
2003 surf->tile_swizzle = xout.pipeBankXor;
2004
2005 /* Gfx11 should shift it by 10 bits instead of 8, and drivers already shift it by 8 bits,
2006 * so shift it by 2 bits here.
2007 */
2008 if (info->gfx_level >= GFX11)
2009 surf->tile_swizzle <<= 2;
2010 }
2011
2012 bool use_dcc = false;
2013 if (surf->modifier != DRM_FORMAT_MOD_INVALID) {
2014 use_dcc = ac_modifier_has_dcc(surf->modifier);
2015 } else {
2016 use_dcc = info->has_graphics && !(surf->flags & RADEON_SURF_DISABLE_DCC) && !compressed &&
2017 is_dcc_supported_by_CB(info, in->swizzleMode) &&
2018 (!in->flags.display ||
2019 gfx9_is_dcc_supported_by_DCN(info, config, surf, !in->flags.metaRbUnaligned,
2020 !in->flags.metaPipeUnaligned));
2021 }
2022
2023 /* DCC */
2024 if (use_dcc) {
2025 ADDR2_COMPUTE_DCCINFO_INPUT din = {0};
2026 ADDR2_COMPUTE_DCCINFO_OUTPUT dout = {0};
2027 ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {0};
2028
2029 din.size = sizeof(ADDR2_COMPUTE_DCCINFO_INPUT);
2030 dout.size = sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT);
2031 dout.pMipInfo = meta_mip_info;
2032
2033 din.dccKeyFlags.pipeAligned = !in->flags.metaPipeUnaligned;
2034 din.dccKeyFlags.rbAligned = !in->flags.metaRbUnaligned;
2035 din.resourceType = in->resourceType;
2036 din.swizzleMode = in->swizzleMode;
2037 din.bpp = in->bpp;
2038 din.unalignedWidth = in->width;
2039 din.unalignedHeight = in->height;
2040 din.numSlices = in->numSlices;
2041 din.numFrags = in->numFrags;
2042 din.numMipLevels = in->numMipLevels;
2043 din.dataSurfaceSize = out.surfSize;
2044 din.firstMipIdInTail = out.firstMipIdInTail;
2045
2046 if (info->gfx_level == GFX9)
2047 simple_mtx_lock(&addrlib->lock);
2048 ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
2049 if (info->gfx_level == GFX9)
2050 simple_mtx_unlock(&addrlib->lock);
2051
2052 if (ret != ADDR_OK)
2053 return ret;
2054
2055 surf->u.gfx9.color.dcc.rb_aligned = din.dccKeyFlags.rbAligned;
2056 surf->u.gfx9.color.dcc.pipe_aligned = din.dccKeyFlags.pipeAligned;
2057 surf->u.gfx9.color.dcc_block_width = dout.compressBlkWidth;
2058 surf->u.gfx9.color.dcc_block_height = dout.compressBlkHeight;
2059 surf->u.gfx9.color.dcc_block_depth = dout.compressBlkDepth;
2060 surf->u.gfx9.color.dcc_pitch_max = dout.pitch - 1;
2061 surf->u.gfx9.color.dcc_height = dout.height;
2062 surf->meta_size = dout.dccRamSize;
2063 surf->meta_slice_size = dout.dccRamSliceSize;
2064 surf->meta_alignment_log2 = util_logbase2(dout.dccRamBaseAlign);
2065 surf->num_meta_levels = in->numMipLevels;
2066
2067 /* Disable DCC for levels that are in the mip tail.
2068 *
2069 * There are two issues that this is intended to
2070 * address:
2071 *
2072 * 1. Multiple mip levels may share a cache line. This
2073 * can lead to corruption when switching between
2074 * rendering to different mip levels because the
2075 * RBs don't maintain coherency.
2076 *
2077 * 2. Texturing with metadata after rendering sometimes
2078 * fails with corruption, probably for a similar
2079 * reason.
2080 *
2081 * Working around these issues for all levels in the
2082 * mip tail may be overly conservative, but it's what
2083 * Vulkan does.
2084 *
2085 * Alternative solutions that also work but are worse:
2086 * - Disable DCC entirely.
2087 * - Flush TC L2 after rendering.
2088 */
2089 for (unsigned i = 0; i < in->numMipLevels; i++) {
2090 surf->u.gfx9.meta_levels[i].offset = meta_mip_info[i].offset;
2091 surf->u.gfx9.meta_levels[i].size = meta_mip_info[i].sliceSize;
2092
2093 if (meta_mip_info[i].inMiptail) {
2094 /* GFX10 can only compress the first level
2095 * in the mip tail.
2096 *
2097 * TODO: Try to do the same thing for gfx9
2098 * if there are no regressions.
2099 */
2100 if (info->gfx_level >= GFX10)
2101 surf->num_meta_levels = i + 1;
2102 else
2103 surf->num_meta_levels = i;
2104 break;
2105 }
2106 }
2107
2108 if (!surf->num_meta_levels)
2109 surf->meta_size = 0;
2110
2111 surf->u.gfx9.color.display_dcc_size = surf->meta_size;
2112 surf->u.gfx9.color.display_dcc_alignment_log2 = surf->meta_alignment_log2;
2113 surf->u.gfx9.color.display_dcc_pitch_max = surf->u.gfx9.color.dcc_pitch_max;
2114 surf->u.gfx9.color.display_dcc_height = surf->u.gfx9.color.dcc_height;
2115
2116 if (in->resourceType == ADDR_RSRC_TEX_2D)
2117 ac_copy_dcc_equation(info, &dout, &surf->u.gfx9.color.dcc_equation);
2118
2119 /* Compute displayable DCC. */
2120 if (((in->flags.display && info->use_display_dcc_with_retile_blit) ||
2121 ac_modifier_has_dcc_retile(surf->modifier)) && surf->num_meta_levels) {
2122 /* Compute displayable DCC info. */
2123 din.dccKeyFlags.pipeAligned = 0;
2124 din.dccKeyFlags.rbAligned = 0;
2125
2126 assert(din.numSlices == 1);
2127 assert(din.numMipLevels == 1);
2128 assert(din.numFrags == 1);
2129 assert(surf->tile_swizzle == 0);
2130 assert(surf->u.gfx9.color.dcc.pipe_aligned || surf->u.gfx9.color.dcc.rb_aligned);
2131
2132 if (info->gfx_level == GFX9)
2133 simple_mtx_lock(&addrlib->lock);
2134 ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
2135 if (info->gfx_level == GFX9)
2136 simple_mtx_unlock(&addrlib->lock);
2137
2138 if (ret != ADDR_OK)
2139 return ret;
2140
2141 surf->u.gfx9.color.display_dcc_size = dout.dccRamSize;
2142 surf->u.gfx9.color.display_dcc_alignment_log2 = util_logbase2(dout.dccRamBaseAlign);
2143 surf->u.gfx9.color.display_dcc_pitch_max = dout.pitch - 1;
2144 surf->u.gfx9.color.display_dcc_height = dout.height;
2145 assert(surf->u.gfx9.color.display_dcc_size <= surf->meta_size);
2146
2147 ac_copy_dcc_equation(info, &dout, &surf->u.gfx9.color.display_dcc_equation);
2148 surf->u.gfx9.color.dcc.display_equation_valid = true;
2149 }
2150 }
2151
2152 /* FMASK (it doesn't exist on GFX11) */
2153 if (info->gfx_level <= GFX10_3 && info->has_graphics &&
2154 in->numSamples > 1 && !(surf->flags & RADEON_SURF_NO_FMASK)) {
2155 ADDR2_COMPUTE_FMASK_INFO_INPUT fin = {0};
2156 ADDR2_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
2157
2158 fin.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_INPUT);
2159 fout.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_OUTPUT);
2160
2161 ret = gfx9_get_preferred_swizzle_mode(addrlib->handle, info, surf, in, true, &fin.swizzleMode);
2162 if (ret != ADDR_OK)
2163 return ret;
2164
2165 fin.unalignedWidth = in->width;
2166 fin.unalignedHeight = in->height;
2167 fin.numSlices = in->numSlices;
2168 fin.numSamples = in->numSamples;
2169 fin.numFrags = in->numFrags;
2170
2171 ret = Addr2ComputeFmaskInfo(addrlib->handle, &fin, &fout);
2172 if (ret != ADDR_OK)
2173 return ret;
2174
2175 surf->u.gfx9.color.fmask_swizzle_mode = fin.swizzleMode;
2176 surf->u.gfx9.color.fmask_epitch = fout.pitch - 1;
2177 surf->fmask_size = fout.fmaskBytes;
2178 surf->fmask_alignment_log2 = util_logbase2(fout.baseAlign);
2179 surf->fmask_slice_size = fout.sliceSize;
2180
2181 /* Compute tile swizzle for the FMASK surface. */
2182 if (config->info.fmask_surf_index && fin.swizzleMode >= ADDR_SW_64KB_Z_T &&
2183 !(surf->flags & RADEON_SURF_SHAREABLE)) {
2184 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
2185 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
2186
2187 xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
2188 xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
2189
2190 /* This counter starts from 1 instead of 0. */
2191 xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
2192 xin.flags = in->flags;
2193 xin.swizzleMode = fin.swizzleMode;
2194 xin.resourceType = in->resourceType;
2195 xin.format = in->format;
2196 xin.numSamples = in->numSamples;
2197 xin.numFrags = in->numFrags;
2198
2199 ret = Addr2ComputePipeBankXor(addrlib->handle, &xin, &xout);
2200 if (ret != ADDR_OK)
2201 return ret;
2202
2203 assert(xout.pipeBankXor <= u_bit_consecutive(0, sizeof(surf->fmask_tile_swizzle) * 8));
2204 surf->fmask_tile_swizzle = xout.pipeBankXor;
2205 }
2206 }
2207
2208 /* CMASK -- on GFX10 only for FMASK (and it doesn't exist on GFX11) */
2209 if (info->gfx_level <= GFX10_3 && info->has_graphics &&
2210 in->swizzleMode != ADDR_SW_LINEAR && in->resourceType == ADDR_RSRC_TEX_2D &&
2211 ((info->gfx_level <= GFX9 && in->numSamples == 1 && in->flags.metaPipeUnaligned == 0 &&
2212 in->flags.metaRbUnaligned == 0) ||
2213 (surf->fmask_size && in->numSamples >= 2))) {
2214 ADDR2_COMPUTE_CMASK_INFO_INPUT cin = {0};
2215 ADDR2_COMPUTE_CMASK_INFO_OUTPUT cout = {0};
2216 ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {0};
2217
2218 cin.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_INPUT);
2219 cout.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_OUTPUT);
2220 cout.pMipInfo = meta_mip_info;
2221
2222 assert(in->flags.metaPipeUnaligned == 0);
2223 assert(in->flags.metaRbUnaligned == 0);
2224
2225 cin.cMaskFlags.pipeAligned = 1;
2226 cin.cMaskFlags.rbAligned = 1;
2227 cin.resourceType = in->resourceType;
2228 cin.unalignedWidth = in->width;
2229 cin.unalignedHeight = in->height;
2230 cin.numSlices = in->numSlices;
2231 cin.numMipLevels = in->numMipLevels;
2232 cin.firstMipIdInTail = out.firstMipIdInTail;
2233
2234 if (in->numSamples > 1)
2235 cin.swizzleMode = surf->u.gfx9.color.fmask_swizzle_mode;
2236 else
2237 cin.swizzleMode = in->swizzleMode;
2238
2239 if (info->gfx_level == GFX9)
2240 simple_mtx_lock(&addrlib->lock);
2241 ret = Addr2ComputeCmaskInfo(addrlib->handle, &cin, &cout);
2242 if (info->gfx_level == GFX9)
2243 simple_mtx_unlock(&addrlib->lock);
2244
2245 if (ret != ADDR_OK)
2246 return ret;
2247
2248 surf->cmask_size = cout.cmaskBytes;
2249 surf->cmask_alignment_log2 = util_logbase2(cout.baseAlign);
2250 surf->cmask_slice_size = cout.sliceSize;
2251 surf->cmask_pitch = cout.pitch;
2252 surf->cmask_height = cout.height;
2253 surf->u.gfx9.color.cmask_level0.offset = meta_mip_info[0].offset;
2254 surf->u.gfx9.color.cmask_level0.size = meta_mip_info[0].sliceSize;
2255
2256 ac_copy_cmask_equation(info, &cout, &surf->u.gfx9.color.cmask_equation);
2257 }
2258 }
2259
2260 return 0;
2261 }
2262
gfx9_compute_surface(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct ac_surf_config * config,enum radeon_surf_mode mode,struct radeon_surf * surf)2263 static int gfx9_compute_surface(struct ac_addrlib *addrlib, const struct radeon_info *info,
2264 const struct ac_surf_config *config, enum radeon_surf_mode mode,
2265 struct radeon_surf *surf)
2266 {
2267 bool compressed;
2268 ADDR2_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
2269 int r;
2270
2271 AddrSurfInfoIn.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_INPUT);
2272
2273 compressed = surf->blk_w == 4 && surf->blk_h == 4;
2274
2275 AddrSurfInfoIn.format = bpe_to_format(surf);
2276 if (!compressed)
2277 AddrSurfInfoIn.bpp = surf->bpe * 8;
2278
2279 bool is_color_surface = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
2280 AddrSurfInfoIn.flags.color = is_color_surface && !(surf->flags & RADEON_SURF_NO_RENDER_TARGET);
2281 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
2282 AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
2283 /* flags.texture currently refers to TC-compatible HTILE */
2284 AddrSurfInfoIn.flags.texture = (is_color_surface && !(surf->flags & RADEON_SURF_NO_TEXTURE)) ||
2285 (surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE);
2286 AddrSurfInfoIn.flags.opt4space = 1;
2287 AddrSurfInfoIn.flags.prt = (surf->flags & RADEON_SURF_PRT) != 0;
2288
2289 AddrSurfInfoIn.numMipLevels = config->info.levels;
2290 AddrSurfInfoIn.numSamples = MAX2(1, config->info.samples);
2291 AddrSurfInfoIn.numFrags = AddrSurfInfoIn.numSamples;
2292
2293 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER))
2294 AddrSurfInfoIn.numFrags = MAX2(1, config->info.storage_samples);
2295
2296 /* GFX9 doesn't support 1D depth textures, so allocate all 1D textures
2297 * as 2D to avoid having shader variants for 1D vs 2D, so all shaders
2298 * must sample 1D textures as 2D. */
2299 if (config->is_3d)
2300 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_3D;
2301 else if (info->gfx_level != GFX9 && config->is_1d)
2302 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_1D;
2303 else
2304 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_2D;
2305
2306 AddrSurfInfoIn.width = config->info.width;
2307 AddrSurfInfoIn.height = config->info.height;
2308
2309 if (config->is_3d)
2310 AddrSurfInfoIn.numSlices = config->info.depth;
2311 else if (config->is_cube)
2312 AddrSurfInfoIn.numSlices = 6;
2313 else
2314 AddrSurfInfoIn.numSlices = config->info.array_size;
2315
2316 /* This is propagated to DCC. It must be 0 for HTILE and CMASK. */
2317 AddrSurfInfoIn.flags.metaPipeUnaligned = 0;
2318 AddrSurfInfoIn.flags.metaRbUnaligned = 0;
2319
2320 if (ac_modifier_has_dcc(surf->modifier)) {
2321 ac_modifier_fill_dcc_params(surf->modifier, surf, &AddrSurfInfoIn);
2322 } else if (!AddrSurfInfoIn.flags.depth && !AddrSurfInfoIn.flags.stencil) {
2323 /* Optimal values for the L2 cache. */
2324 /* Don't change the DCC settings for imported buffers - they might differ. */
2325 if (!(surf->flags & RADEON_SURF_IMPORTED)) {
2326 if (info->gfx_level >= GFX11_5) {
2327 surf->u.gfx9.color.dcc.independent_64B_blocks = 0;
2328 surf->u.gfx9.color.dcc.independent_128B_blocks = 1;
2329 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_256B;
2330 } else if (info->gfx_level >= GFX10) {
2331 surf->u.gfx9.color.dcc.independent_64B_blocks = 0;
2332 surf->u.gfx9.color.dcc.independent_128B_blocks = 1;
2333 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
2334 } else if (info->gfx_level == GFX9) {
2335 surf->u.gfx9.color.dcc.independent_64B_blocks = 1;
2336 surf->u.gfx9.color.dcc.independent_128B_blocks = 0;
2337 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
2338 }
2339 }
2340
2341 if (AddrSurfInfoIn.flags.display) {
2342 /* The display hardware can only read DCC with RB_ALIGNED=0 and
2343 * PIPE_ALIGNED=0. PIPE_ALIGNED really means L2CACHE_ALIGNED.
2344 *
2345 * The CB block requires RB_ALIGNED=1 except 1 RB chips.
2346 * PIPE_ALIGNED is optional, but PIPE_ALIGNED=0 requires L2 flushes
2347 * after rendering, so PIPE_ALIGNED=1 is recommended.
2348 */
2349 if (info->use_display_dcc_unaligned) {
2350 AddrSurfInfoIn.flags.metaPipeUnaligned = 1;
2351 AddrSurfInfoIn.flags.metaRbUnaligned = 1;
2352 }
2353
2354 /* Adjust DCC settings to meet DCN requirements. */
2355 /* Don't change the DCC settings for imported buffers - they might differ. */
2356 if (!(surf->flags & RADEON_SURF_IMPORTED) &&
2357 (info->use_display_dcc_unaligned || info->use_display_dcc_with_retile_blit)) {
2358 // TODO: clarify DCN support with the DAL team for gfx11.5
2359
2360 /* Only Navi12/14 support independent 64B blocks in L2,
2361 * but without DCC image stores.
2362 */
2363 if (info->family == CHIP_NAVI12 || info->family == CHIP_NAVI14) {
2364 surf->u.gfx9.color.dcc.independent_64B_blocks = 1;
2365 surf->u.gfx9.color.dcc.independent_128B_blocks = 0;
2366 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
2367 }
2368
2369 if ((info->gfx_level >= GFX10_3 && info->family <= CHIP_REMBRANDT) ||
2370 /* Newer chips will skip this when possible to get better performance.
2371 * This is also possible for other gfx10.3 chips, but is disabled for
2372 * interoperability between different Mesa versions.
2373 */
2374 (info->family > CHIP_REMBRANDT &&
2375 gfx10_DCN_requires_independent_64B_blocks(info, config))) {
2376 surf->u.gfx9.color.dcc.independent_64B_blocks = 1;
2377 surf->u.gfx9.color.dcc.independent_128B_blocks = 1;
2378 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
2379 }
2380 }
2381 }
2382 }
2383
2384 if (surf->modifier == DRM_FORMAT_MOD_INVALID) {
2385 switch (mode) {
2386 case RADEON_SURF_MODE_LINEAR_ALIGNED:
2387 assert(config->info.samples <= 1);
2388 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
2389 AddrSurfInfoIn.swizzleMode = ADDR_SW_LINEAR;
2390 break;
2391
2392 case RADEON_SURF_MODE_1D:
2393 case RADEON_SURF_MODE_2D:
2394 if (surf->flags & RADEON_SURF_IMPORTED ||
2395 (info->gfx_level >= GFX10 && surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE)) {
2396 AddrSurfInfoIn.swizzleMode = surf->u.gfx9.swizzle_mode;
2397 break;
2398 }
2399
2400 /* On GFX11, the only allowed swizzle mode for VRS rate images is
2401 * 64KB_R_X.
2402 */
2403 if (info->gfx_level >= GFX11 && surf->flags & RADEON_SURF_VRS_RATE) {
2404 AddrSurfInfoIn.swizzleMode = ADDR_SW_64KB_R_X;
2405 break;
2406 }
2407
2408 r = gfx9_get_preferred_swizzle_mode(addrlib->handle, info, surf, &AddrSurfInfoIn, false,
2409 &AddrSurfInfoIn.swizzleMode);
2410 if (r)
2411 return r;
2412 break;
2413
2414 default:
2415 assert(0);
2416 }
2417 } else {
2418 /* We have a valid and required modifier here. */
2419
2420 assert(!compressed);
2421 assert(!ac_modifier_has_dcc(surf->modifier) ||
2422 !(surf->flags & RADEON_SURF_DISABLE_DCC));
2423
2424 AddrSurfInfoIn.swizzleMode = ac_get_modifier_swizzle_mode(info->gfx_level, surf->modifier);
2425 }
2426
2427 surf->u.gfx9.resource_type = (enum gfx9_resource_type)AddrSurfInfoIn.resourceType;
2428 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
2429
2430 surf->num_meta_levels = 0;
2431 surf->surf_size = 0;
2432 surf->fmask_size = 0;
2433 surf->meta_size = 0;
2434 surf->meta_slice_size = 0;
2435 surf->u.gfx9.surf_offset = 0;
2436 if (AddrSurfInfoIn.flags.stencil)
2437 surf->u.gfx9.zs.stencil_offset = 0;
2438 surf->cmask_size = 0;
2439
2440 const bool only_stencil =
2441 (surf->flags & RADEON_SURF_SBUFFER) && !(surf->flags & RADEON_SURF_ZBUFFER);
2442
2443 /* Calculate texture layout information. */
2444 if (!only_stencil) {
2445 r = gfx9_compute_miptree(addrlib, info, config, surf, compressed, &AddrSurfInfoIn);
2446 if (r)
2447 return r;
2448 }
2449
2450 /* Calculate texture layout information for stencil. */
2451 if (surf->flags & RADEON_SURF_SBUFFER) {
2452 AddrSurfInfoIn.flags.stencil = 1;
2453 AddrSurfInfoIn.bpp = 8;
2454 AddrSurfInfoIn.format = ADDR_FMT_8;
2455
2456 if (!AddrSurfInfoIn.flags.depth) {
2457 r = gfx9_get_preferred_swizzle_mode(addrlib->handle, info, surf, &AddrSurfInfoIn, false,
2458 &AddrSurfInfoIn.swizzleMode);
2459 if (r)
2460 return r;
2461 } else
2462 AddrSurfInfoIn.flags.depth = 0;
2463
2464 r = gfx9_compute_miptree(addrlib, info, config, surf, compressed, &AddrSurfInfoIn);
2465 if (r)
2466 return r;
2467 }
2468
2469 surf->is_linear = (only_stencil ? surf->u.gfx9.zs.stencil_swizzle_mode :
2470 surf->u.gfx9.swizzle_mode) == ADDR_SW_LINEAR;
2471
2472 /* Query whether the surface is displayable. */
2473 /* This is only useful for surfaces that are allocated without SCANOUT. */
2474 BOOL_32 displayable = false;
2475 if (!config->is_3d && !config->is_cube) {
2476 r = Addr2IsValidDisplaySwizzleMode(addrlib->handle, surf->u.gfx9.swizzle_mode,
2477 surf->bpe * 8, &displayable);
2478 if (r)
2479 return r;
2480
2481 /* Display needs unaligned DCC. */
2482 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
2483 surf->num_meta_levels &&
2484 (!gfx9_is_dcc_supported_by_DCN(info, config, surf, surf->u.gfx9.color.dcc.rb_aligned,
2485 surf->u.gfx9.color.dcc.pipe_aligned) ||
2486 /* Don't set is_displayable if displayable DCC is missing. */
2487 (info->use_display_dcc_with_retile_blit && !surf->u.gfx9.color.dcc.display_equation_valid)))
2488 displayable = false;
2489 }
2490 surf->is_displayable = displayable;
2491
2492 /* Validate that we allocated a displayable surface if requested. */
2493 assert(!AddrSurfInfoIn.flags.display || surf->is_displayable);
2494
2495 /* Validate that DCC is set up correctly. */
2496 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && surf->num_meta_levels) {
2497 assert(is_dcc_supported_by_L2(info, surf));
2498 if (AddrSurfInfoIn.flags.color)
2499 assert(is_dcc_supported_by_CB(info, surf->u.gfx9.swizzle_mode));
2500 if (AddrSurfInfoIn.flags.display && surf->modifier == DRM_FORMAT_MOD_INVALID) {
2501 assert(gfx9_is_dcc_supported_by_DCN(info, config, surf, surf->u.gfx9.color.dcc.rb_aligned,
2502 surf->u.gfx9.color.dcc.pipe_aligned));
2503 }
2504 }
2505
2506 if (info->has_graphics && !compressed && !config->is_3d && config->info.levels == 1 &&
2507 AddrSurfInfoIn.flags.color && !surf->is_linear &&
2508 (1 << surf->surf_alignment_log2) >= 64 * 1024 && /* 64KB tiling */
2509 !(surf->flags & (RADEON_SURF_DISABLE_DCC | RADEON_SURF_FORCE_SWIZZLE_MODE |
2510 RADEON_SURF_FORCE_MICRO_TILE_MODE)) &&
2511 surf->modifier == DRM_FORMAT_MOD_INVALID &&
2512 gfx9_is_dcc_supported_by_DCN(info, config, surf, surf->u.gfx9.color.dcc.rb_aligned,
2513 surf->u.gfx9.color.dcc.pipe_aligned)) {
2514 /* Validate that DCC is enabled if DCN can do it. */
2515 if ((info->use_display_dcc_unaligned || info->use_display_dcc_with_retile_blit) &&
2516 AddrSurfInfoIn.flags.display && surf->bpe == 4) {
2517 assert(surf->num_meta_levels);
2518 }
2519
2520 /* Validate that non-scanout DCC is always enabled. */
2521 if (!AddrSurfInfoIn.flags.display)
2522 assert(surf->num_meta_levels);
2523 }
2524
2525 if (!surf->meta_size) {
2526 /* Unset this if HTILE is not present. */
2527 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
2528 }
2529
2530 if (surf->modifier != DRM_FORMAT_MOD_INVALID) {
2531 assert((surf->num_meta_levels != 0) == ac_modifier_has_dcc(surf->modifier));
2532 }
2533
2534 switch (surf->u.gfx9.swizzle_mode) {
2535 /* S = standard. */
2536 case ADDR_SW_256B_S:
2537 case ADDR_SW_4KB_S:
2538 case ADDR_SW_64KB_S:
2539 case ADDR_SW_64KB_S_T:
2540 case ADDR_SW_4KB_S_X:
2541 case ADDR_SW_64KB_S_X:
2542 case ADDR_SW_256KB_S_X:
2543 surf->micro_tile_mode = RADEON_MICRO_MODE_STANDARD;
2544 break;
2545
2546 /* D = display. */
2547 case ADDR_SW_LINEAR:
2548 case ADDR_SW_256B_D:
2549 case ADDR_SW_4KB_D:
2550 case ADDR_SW_64KB_D:
2551 case ADDR_SW_64KB_D_T:
2552 case ADDR_SW_4KB_D_X:
2553 case ADDR_SW_64KB_D_X:
2554 case ADDR_SW_256KB_D_X:
2555 surf->micro_tile_mode = RADEON_MICRO_MODE_DISPLAY;
2556 break;
2557
2558 /* R = rotated (gfx9), render target (gfx10). */
2559 case ADDR_SW_256B_R:
2560 case ADDR_SW_4KB_R:
2561 case ADDR_SW_64KB_R:
2562 case ADDR_SW_64KB_R_T:
2563 case ADDR_SW_4KB_R_X:
2564 case ADDR_SW_64KB_R_X:
2565 case ADDR_SW_256KB_R_X:
2566 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
2567 * used at the same time. We currently do not use rotated
2568 * in gfx9.
2569 */
2570 assert(info->gfx_level >= GFX10 || !"rotate micro tile mode is unsupported");
2571 surf->micro_tile_mode = RADEON_MICRO_MODE_RENDER;
2572 break;
2573
2574 /* Z = depth. */
2575 case ADDR_SW_4KB_Z:
2576 case ADDR_SW_64KB_Z:
2577 case ADDR_SW_64KB_Z_T:
2578 case ADDR_SW_4KB_Z_X:
2579 case ADDR_SW_64KB_Z_X:
2580 case ADDR_SW_256KB_Z_X:
2581 surf->micro_tile_mode = RADEON_MICRO_MODE_DEPTH;
2582 break;
2583
2584 default:
2585 assert(0);
2586 }
2587
2588 return 0;
2589 }
2590
ac_compute_surface(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct ac_surf_config * config,enum radeon_surf_mode mode,struct radeon_surf * surf)2591 int ac_compute_surface(struct ac_addrlib *addrlib, const struct radeon_info *info,
2592 const struct ac_surf_config *config, enum radeon_surf_mode mode,
2593 struct radeon_surf *surf)
2594 {
2595 int r;
2596
2597 r = surf_config_sanity(config, surf->flags);
2598 if (r)
2599 return r;
2600
2601 /* Images are emulated on some CDNA chips. */
2602 if (!info->has_image_opcodes)
2603 mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
2604
2605 /* 0 offsets mean disabled. */
2606 surf->meta_offset = surf->fmask_offset = surf->cmask_offset = surf->display_dcc_offset = 0;
2607
2608 if (info->family_id >= FAMILY_AI)
2609 r = gfx9_compute_surface(addrlib, info, config, mode, surf);
2610 else
2611 r = gfx6_compute_surface(addrlib->handle, info, config, mode, surf);
2612
2613 if (r)
2614 return r;
2615
2616 /* Determine the memory layout of multiple allocations in one buffer. */
2617 surf->total_size = surf->surf_size;
2618 surf->alignment_log2 = surf->surf_alignment_log2;
2619
2620 if (surf->fmask_size) {
2621 assert(config->info.samples >= 2);
2622 surf->fmask_offset = align64(surf->total_size, 1ull << surf->fmask_alignment_log2);
2623 surf->total_size = surf->fmask_offset + surf->fmask_size;
2624 surf->alignment_log2 = MAX2(surf->alignment_log2, surf->fmask_alignment_log2);
2625 }
2626
2627 /* Single-sample CMASK is in a separate buffer. */
2628 if (surf->cmask_size && config->info.samples >= 2) {
2629 surf->cmask_offset = align64(surf->total_size, 1ull << surf->cmask_alignment_log2);
2630 surf->total_size = surf->cmask_offset + surf->cmask_size;
2631 surf->alignment_log2 = MAX2(surf->alignment_log2, surf->cmask_alignment_log2);
2632 }
2633
2634 if (surf->is_displayable)
2635 surf->flags |= RADEON_SURF_SCANOUT;
2636
2637 if (surf->meta_size &&
2638 /* dcc_size is computed on GFX9+ only if it's displayable. */
2639 (info->gfx_level >= GFX9 || !get_display_flag(config, surf))) {
2640 /* It's better when displayable DCC is immediately after
2641 * the image due to hw-specific reasons.
2642 */
2643 if (info->gfx_level >= GFX9 &&
2644 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
2645 surf->u.gfx9.color.dcc.display_equation_valid) {
2646 /* Add space for the displayable DCC buffer. */
2647 surf->display_dcc_offset = align64(surf->total_size, 1ull << surf->u.gfx9.color.display_dcc_alignment_log2);
2648 surf->total_size = surf->display_dcc_offset + surf->u.gfx9.color.display_dcc_size;
2649 }
2650
2651 surf->meta_offset = align64(surf->total_size, 1ull << surf->meta_alignment_log2);
2652 surf->total_size = surf->meta_offset + surf->meta_size;
2653 surf->alignment_log2 = MAX2(surf->alignment_log2, surf->meta_alignment_log2);
2654 }
2655
2656 return 0;
2657 }
2658
2659 /* This is meant to be used for disabling DCC. */
ac_surface_zero_dcc_fields(struct radeon_surf * surf)2660 void ac_surface_zero_dcc_fields(struct radeon_surf *surf)
2661 {
2662 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER)
2663 return;
2664
2665 surf->meta_offset = 0;
2666 surf->display_dcc_offset = 0;
2667 if (!surf->fmask_offset && !surf->cmask_offset) {
2668 surf->total_size = surf->surf_size;
2669 surf->alignment_log2 = surf->surf_alignment_log2;
2670 }
2671 }
2672
eg_tile_split(unsigned tile_split)2673 static unsigned eg_tile_split(unsigned tile_split)
2674 {
2675 switch (tile_split) {
2676 case 0:
2677 tile_split = 64;
2678 break;
2679 case 1:
2680 tile_split = 128;
2681 break;
2682 case 2:
2683 tile_split = 256;
2684 break;
2685 case 3:
2686 tile_split = 512;
2687 break;
2688 default:
2689 case 4:
2690 tile_split = 1024;
2691 break;
2692 case 5:
2693 tile_split = 2048;
2694 break;
2695 case 6:
2696 tile_split = 4096;
2697 break;
2698 }
2699 return tile_split;
2700 }
2701
eg_tile_split_rev(unsigned eg_tile_split)2702 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
2703 {
2704 switch (eg_tile_split) {
2705 case 64:
2706 return 0;
2707 case 128:
2708 return 1;
2709 case 256:
2710 return 2;
2711 case 512:
2712 return 3;
2713 default:
2714 case 1024:
2715 return 4;
2716 case 2048:
2717 return 5;
2718 case 4096:
2719 return 6;
2720 }
2721 }
2722
2723 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_SHIFT 45
2724 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_MASK 0x3
2725
2726 /* This should be called before ac_compute_surface. */
ac_surface_apply_bo_metadata(const struct radeon_info * info,struct radeon_surf * surf,uint64_t tiling_flags,enum radeon_surf_mode * mode)2727 void ac_surface_apply_bo_metadata(const struct radeon_info *info, struct radeon_surf *surf,
2728 uint64_t tiling_flags, enum radeon_surf_mode *mode)
2729 {
2730 bool scanout;
2731
2732 if (info->gfx_level >= GFX9) {
2733 surf->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
2734 surf->u.gfx9.color.dcc.independent_64B_blocks =
2735 AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_64B);
2736 surf->u.gfx9.color.dcc.independent_128B_blocks =
2737 AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_128B);
2738 surf->u.gfx9.color.dcc.max_compressed_block_size =
2739 AMDGPU_TILING_GET(tiling_flags, DCC_MAX_COMPRESSED_BLOCK_SIZE);
2740 surf->u.gfx9.color.display_dcc_pitch_max = AMDGPU_TILING_GET(tiling_flags, DCC_PITCH_MAX);
2741 scanout = AMDGPU_TILING_GET(tiling_flags, SCANOUT);
2742 *mode =
2743 surf->u.gfx9.swizzle_mode > 0 ? RADEON_SURF_MODE_2D : RADEON_SURF_MODE_LINEAR_ALIGNED;
2744 } else {
2745 surf->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
2746 surf->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
2747 surf->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
2748 surf->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
2749 surf->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
2750 surf->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
2751 scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
2752
2753 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
2754 *mode = RADEON_SURF_MODE_2D;
2755 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
2756 *mode = RADEON_SURF_MODE_1D;
2757 else
2758 *mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
2759 }
2760
2761 if (scanout)
2762 surf->flags |= RADEON_SURF_SCANOUT;
2763 else
2764 surf->flags &= ~RADEON_SURF_SCANOUT;
2765 }
2766
ac_surface_compute_bo_metadata(const struct radeon_info * info,struct radeon_surf * surf,uint64_t * tiling_flags)2767 void ac_surface_compute_bo_metadata(const struct radeon_info *info, struct radeon_surf *surf,
2768 uint64_t *tiling_flags)
2769 {
2770 *tiling_flags = 0;
2771
2772 if (info->gfx_level >= GFX9) {
2773 uint64_t dcc_offset = 0;
2774
2775 if (surf->meta_offset) {
2776 dcc_offset = surf->display_dcc_offset ? surf->display_dcc_offset : surf->meta_offset;
2777 assert((dcc_offset >> 8) != 0 && (dcc_offset >> 8) < (1 << 24));
2778 }
2779
2780 *tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, surf->u.gfx9.swizzle_mode);
2781 *tiling_flags |= AMDGPU_TILING_SET(DCC_OFFSET_256B, dcc_offset >> 8);
2782 *tiling_flags |= AMDGPU_TILING_SET(DCC_PITCH_MAX, surf->u.gfx9.color.display_dcc_pitch_max);
2783 *tiling_flags |=
2784 AMDGPU_TILING_SET(DCC_INDEPENDENT_64B, surf->u.gfx9.color.dcc.independent_64B_blocks);
2785 *tiling_flags |=
2786 AMDGPU_TILING_SET(DCC_INDEPENDENT_128B, surf->u.gfx9.color.dcc.independent_128B_blocks);
2787 *tiling_flags |= AMDGPU_TILING_SET(DCC_MAX_COMPRESSED_BLOCK_SIZE,
2788 surf->u.gfx9.color.dcc.max_compressed_block_size);
2789 *tiling_flags |= AMDGPU_TILING_SET(SCANOUT, (surf->flags & RADEON_SURF_SCANOUT) != 0);
2790 } else {
2791 if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D)
2792 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
2793 else if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D)
2794 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
2795 else
2796 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
2797
2798 *tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, surf->u.legacy.pipe_config);
2799 *tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(surf->u.legacy.bankw));
2800 *tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(surf->u.legacy.bankh));
2801 if (surf->u.legacy.tile_split)
2802 *tiling_flags |=
2803 AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(surf->u.legacy.tile_split));
2804 *tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(surf->u.legacy.mtilea));
2805 *tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(surf->u.legacy.num_banks) - 1);
2806
2807 if (surf->flags & RADEON_SURF_SCANOUT)
2808 *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
2809 else
2810 *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
2811 }
2812 }
2813
ac_get_umd_metadata_word1(const struct radeon_info * info)2814 static uint32_t ac_get_umd_metadata_word1(const struct radeon_info *info)
2815 {
2816 return (ATI_VENDOR_ID << 16) | info->pci_id;
2817 }
2818
2819 /* This should be called after ac_compute_surface. */
ac_surface_apply_umd_metadata(const struct radeon_info * info,struct radeon_surf * surf,unsigned num_storage_samples,unsigned num_mipmap_levels,unsigned size_metadata,const uint32_t metadata[64])2820 bool ac_surface_apply_umd_metadata(const struct radeon_info *info, struct radeon_surf *surf,
2821 unsigned num_storage_samples, unsigned num_mipmap_levels,
2822 unsigned size_metadata, const uint32_t metadata[64])
2823 {
2824 const uint32_t *desc = &metadata[2];
2825 uint64_t offset;
2826
2827 if (surf->modifier != DRM_FORMAT_MOD_INVALID)
2828 return true;
2829
2830 if (info->gfx_level >= GFX9)
2831 offset = surf->u.gfx9.surf_offset;
2832 else
2833 offset = (uint64_t)surf->u.legacy.level[0].offset_256B * 256;
2834
2835 if (offset || /* Non-zero planes ignore metadata. */
2836 size_metadata < 10 * 4 || /* at least 2(header) + 8(desc) dwords */
2837 metadata[0] == 0 || /* invalid version number (1 and 2 layouts are compatible) */
2838 metadata[1] != ac_get_umd_metadata_word1(info)) /* invalid PCI ID */ {
2839 /* Disable DCC because it might not be enabled. */
2840 ac_surface_zero_dcc_fields(surf);
2841
2842 /* Don't report an error if the texture comes from an incompatible driver,
2843 * but this might not work.
2844 */
2845 return true;
2846 }
2847
2848 /* Validate that sample counts and the number of mipmap levels match. */
2849 unsigned desc_last_level = G_008F1C_LAST_LEVEL(desc[3]);
2850 unsigned type = G_008F1C_TYPE(desc[3]);
2851
2852 if (type == V_008F1C_SQ_RSRC_IMG_2D_MSAA || type == V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY) {
2853 unsigned log_samples = util_logbase2(MAX2(1, num_storage_samples));
2854
2855 if (desc_last_level != log_samples) {
2856 fprintf(stderr,
2857 "amdgpu: invalid MSAA texture import, "
2858 "metadata has log2(samples) = %u, the caller set %u\n",
2859 desc_last_level, log_samples);
2860 return false;
2861 }
2862 } else {
2863 if (desc_last_level != num_mipmap_levels - 1) {
2864 fprintf(stderr,
2865 "amdgpu: invalid mipmapped texture import, "
2866 "metadata has last_level = %u, the caller set %u\n",
2867 desc_last_level, num_mipmap_levels - 1);
2868 return false;
2869 }
2870 }
2871
2872 if (info->gfx_level >= GFX8 && G_008F28_COMPRESSION_EN(desc[6])) {
2873 /* Read DCC information. */
2874 switch (info->gfx_level) {
2875 case GFX8:
2876 surf->meta_offset = (uint64_t)desc[7] << 8;
2877 break;
2878
2879 case GFX9:
2880 surf->meta_offset =
2881 ((uint64_t)desc[7] << 8) | ((uint64_t)G_008F24_META_DATA_ADDRESS(desc[5]) << 40);
2882 surf->u.gfx9.color.dcc.pipe_aligned = G_008F24_META_PIPE_ALIGNED(desc[5]);
2883 surf->u.gfx9.color.dcc.rb_aligned = G_008F24_META_RB_ALIGNED(desc[5]);
2884
2885 /* If DCC is unaligned, this can only be a displayable image. */
2886 if (!surf->u.gfx9.color.dcc.pipe_aligned && !surf->u.gfx9.color.dcc.rb_aligned)
2887 assert(surf->is_displayable);
2888 break;
2889
2890 case GFX10:
2891 case GFX10_3:
2892 case GFX11:
2893 case GFX11_5:
2894 surf->meta_offset =
2895 ((uint64_t)G_00A018_META_DATA_ADDRESS_LO(desc[6]) << 8) | ((uint64_t)desc[7] << 16);
2896 surf->u.gfx9.color.dcc.pipe_aligned = G_00A018_META_PIPE_ALIGNED(desc[6]);
2897 break;
2898
2899 default:
2900 assert(0);
2901 return false;
2902 }
2903 } else {
2904 /* Disable DCC. dcc_offset is always set by texture_from_handle
2905 * and must be cleared here.
2906 */
2907 ac_surface_zero_dcc_fields(surf);
2908 }
2909
2910 return true;
2911 }
2912
ac_surface_compute_umd_metadata(const struct radeon_info * info,struct radeon_surf * surf,unsigned num_mipmap_levels,uint32_t desc[8],unsigned * size_metadata,uint32_t metadata[64],bool include_tool_md)2913 void ac_surface_compute_umd_metadata(const struct radeon_info *info, struct radeon_surf *surf,
2914 unsigned num_mipmap_levels, uint32_t desc[8],
2915 unsigned *size_metadata, uint32_t metadata[64],
2916 bool include_tool_md)
2917 {
2918 /* Clear the base address and set the relative DCC offset. */
2919 desc[0] = 0;
2920 desc[1] &= C_008F14_BASE_ADDRESS_HI;
2921
2922 switch (info->gfx_level) {
2923 case GFX6:
2924 case GFX7:
2925 break;
2926 case GFX8:
2927 desc[7] = surf->meta_offset >> 8;
2928 break;
2929 case GFX9:
2930 desc[7] = surf->meta_offset >> 8;
2931 desc[5] &= C_008F24_META_DATA_ADDRESS;
2932 desc[5] |= S_008F24_META_DATA_ADDRESS(surf->meta_offset >> 40);
2933 break;
2934 case GFX10:
2935 case GFX10_3:
2936 case GFX11:
2937 case GFX11_5:
2938 desc[6] &= C_00A018_META_DATA_ADDRESS_LO;
2939 desc[6] |= S_00A018_META_DATA_ADDRESS_LO(surf->meta_offset >> 8);
2940 desc[7] = surf->meta_offset >> 16;
2941 break;
2942 default:
2943 assert(0);
2944 }
2945
2946 /* Metadata image format format version 1 and 2. Version 2 uses the same layout as
2947 * version 1 with some additional fields (used if include_tool_md=true).
2948 * [0] = metadata_format_identifier
2949 * [1] = (VENDOR_ID << 16) | PCI_ID
2950 * [2:9] = image descriptor for the whole resource
2951 * [2] is always 0, because the base address is cleared
2952 * [9] is the DCC offset bits [39:8] from the beginning of
2953 * the buffer
2954 * gfx8-: [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level (gfx8-)
2955 * ---- The data below is only set in version=2.
2956 * It shouldn't be used by the driver as it's only present to help
2957 * tools (eg: umr) that would want to access this buffer.
2958 * gfx9+ if valid modifier: [10:11] = modifier
2959 * [12:12+3*nplane] = [offset, stride]
2960 * else: [10]: stride
2961 */
2962 metadata[0] = include_tool_md ? 2 : 1; /* metadata image format version */
2963
2964 /* Tiling modes are ambiguous without a PCI ID. */
2965 metadata[1] = ac_get_umd_metadata_word1(info);
2966
2967 /* Dwords [2:9] contain the image descriptor. */
2968 memcpy(&metadata[2], desc, 8 * 4);
2969 *size_metadata = 10 * 4;
2970
2971 /* Dwords [10:..] contain the mipmap level offsets. */
2972 if (info->gfx_level <= GFX8) {
2973 for (unsigned i = 0; i < num_mipmap_levels; i++)
2974 metadata[10 + i] = surf->u.legacy.level[i].offset_256B;
2975
2976 *size_metadata += num_mipmap_levels * 4;
2977 } else if (include_tool_md) {
2978 if (surf->modifier != DRM_FORMAT_MOD_INVALID) {
2979 /* Modifier */
2980 metadata[10] = surf->modifier;
2981 metadata[11] = surf->modifier >> 32;
2982 /* Num planes */
2983 int nplanes = ac_surface_get_nplanes(surf);
2984 metadata[12] = nplanes;
2985 int ndw = 13;
2986 for (int i = 0; i < nplanes; i++) {
2987 metadata[ndw++] = ac_surface_get_plane_offset(info->gfx_level,
2988 surf, i, 0);
2989 metadata[ndw++] = ac_surface_get_plane_stride(info->gfx_level,
2990 surf, i, 0);
2991 }
2992 *size_metadata = ndw * 4;
2993 } else {
2994 metadata[10] = ac_surface_get_plane_stride(info->gfx_level,
2995 surf, 0, 0);
2996 *size_metadata = 11 * 4;
2997 }
2998 }
2999 }
3000
ac_surface_get_pitch_align(const struct radeon_info * info,const struct radeon_surf * surf)3001 static uint32_t ac_surface_get_pitch_align(const struct radeon_info *info,
3002 const struct radeon_surf *surf)
3003 {
3004 if (surf->is_linear) {
3005 if (info->gfx_level >= GFX9)
3006 return 256 / surf->bpe;
3007 else
3008 return MAX2(8, 64 / surf->bpe);
3009 }
3010
3011 if (info->gfx_level >= GFX9) {
3012 if (surf->u.gfx9.resource_type == RADEON_RESOURCE_3D)
3013 return 1u << 31; /* reject 3D textures by returning an impossible alignment */
3014
3015 unsigned bpe_log2 = util_logbase2(surf->bpe);
3016 unsigned block_size_log2;
3017
3018 switch((surf->u.gfx9.swizzle_mode & ~3) + 3) {
3019 case ADDR_SW_256B_R:
3020 block_size_log2 = 8;
3021 break;
3022 case ADDR_SW_4KB_R:
3023 case ADDR_SW_4KB_R_X:
3024 block_size_log2 = 12;
3025 break;
3026 case ADDR_SW_64KB_R:
3027 case ADDR_SW_64KB_R_T:
3028 case ADDR_SW_64KB_R_X:
3029 block_size_log2 = 16;
3030 break;
3031 case ADDR_SW_256KB_R_X:
3032 block_size_log2 = 18;
3033 break;
3034 default:
3035 unreachable("unhandled swizzle mode");
3036 }
3037
3038 if (info->gfx_level >= GFX10) {
3039 return 1 << (((block_size_log2 - bpe_log2) + 1) / 2);
3040 } else {
3041 static unsigned block_256B_width[] = {16, 16, 8, 8, 4};
3042 return block_256B_width[bpe_log2] << ((block_size_log2 - 8) / 2);
3043 }
3044 } else {
3045 unsigned mode;
3046
3047 if ((surf->flags & RADEON_SURF_Z_OR_SBUFFER) == RADEON_SURF_SBUFFER)
3048 mode = surf->u.legacy.zs.stencil_level[0].mode;
3049 else
3050 mode = surf->u.legacy.level[0].mode;
3051
3052 /* Note that display usage requires an alignment of 32 pixels (see AdjustPitchAlignment),
3053 * which is not checked here.
3054 */
3055 switch (mode) {
3056 case RADEON_SURF_MODE_1D:
3057 return 8;
3058 case RADEON_SURF_MODE_2D:
3059 return 8 * surf->u.legacy.bankw * surf->u.legacy.mtilea *
3060 ac_pipe_config_to_num_pipes(surf->u.legacy.pipe_config);
3061 default:
3062 unreachable("unhandled surf mode");
3063 }
3064 }
3065 }
3066
ac_surface_override_offset_stride(const struct radeon_info * info,struct radeon_surf * surf,unsigned num_layers,unsigned num_mipmap_levels,uint64_t offset,unsigned pitch)3067 bool ac_surface_override_offset_stride(const struct radeon_info *info, struct radeon_surf *surf,
3068 unsigned num_layers, unsigned num_mipmap_levels,
3069 uint64_t offset, unsigned pitch)
3070 {
3071 if ((ac_surface_get_pitch_align(info, surf) - 1) & pitch)
3072 return false;
3073
3074 /* Require an equal pitch with metadata (DCC), mipmapping, non-linear layout (that could be
3075 * relaxed), or when the chip is GFX10, which is the only generation that can't override
3076 * the pitch.
3077 */
3078 bool require_equal_pitch = surf->surf_size != surf->total_size ||
3079 num_layers != 1 ||
3080 num_mipmap_levels != 1 ||
3081 (info->gfx_level >= GFX9 && !surf->is_linear) ||
3082 info->gfx_level == GFX10;
3083
3084 if (info->gfx_level >= GFX9) {
3085 if (pitch) {
3086 if (surf->u.gfx9.surf_pitch != pitch && require_equal_pitch)
3087 return false;
3088
3089 if (pitch != surf->u.gfx9.surf_pitch) {
3090 unsigned slices = surf->surf_size / surf->u.gfx9.surf_slice_size;
3091
3092 surf->u.gfx9.uses_custom_pitch = true;
3093 surf->u.gfx9.surf_pitch = pitch;
3094 surf->u.gfx9.epitch = pitch - 1;
3095 surf->u.gfx9.pitch[0] = pitch;
3096 surf->u.gfx9.surf_slice_size = (uint64_t)pitch * surf->u.gfx9.surf_height * surf->bpe;
3097 surf->total_size = surf->surf_size = surf->u.gfx9.surf_slice_size * slices;
3098 }
3099 }
3100
3101 surf->u.gfx9.surf_offset = offset;
3102 if (surf->has_stencil)
3103 surf->u.gfx9.zs.stencil_offset += offset;
3104 } else {
3105 if (pitch) {
3106 if (surf->u.legacy.level[0].nblk_x != pitch && require_equal_pitch)
3107 return false;
3108
3109 surf->u.legacy.level[0].nblk_x = pitch;
3110 surf->u.legacy.level[0].slice_size_dw =
3111 ((uint64_t)pitch * surf->u.legacy.level[0].nblk_y * surf->bpe) / 4;
3112 }
3113
3114 if (offset) {
3115 for (unsigned i = 0; i < ARRAY_SIZE(surf->u.legacy.level); ++i)
3116 surf->u.legacy.level[i].offset_256B += offset / 256;
3117 }
3118 }
3119
3120 if (offset & ((1 << surf->alignment_log2) - 1) ||
3121 offset >= UINT64_MAX - surf->total_size)
3122 return false;
3123
3124 if (surf->meta_offset)
3125 surf->meta_offset += offset;
3126 if (surf->fmask_offset)
3127 surf->fmask_offset += offset;
3128 if (surf->cmask_offset)
3129 surf->cmask_offset += offset;
3130 if (surf->display_dcc_offset)
3131 surf->display_dcc_offset += offset;
3132 return true;
3133 }
3134
ac_surface_get_nplanes(const struct radeon_surf * surf)3135 unsigned ac_surface_get_nplanes(const struct radeon_surf *surf)
3136 {
3137 if (surf->modifier == DRM_FORMAT_MOD_INVALID)
3138 return 1;
3139 else if (surf->display_dcc_offset)
3140 return 3;
3141 else if (surf->meta_offset)
3142 return 2;
3143 else
3144 return 1;
3145 }
3146
ac_surface_get_plane_offset(enum amd_gfx_level gfx_level,const struct radeon_surf * surf,unsigned plane,unsigned layer)3147 uint64_t ac_surface_get_plane_offset(enum amd_gfx_level gfx_level,
3148 const struct radeon_surf *surf,
3149 unsigned plane, unsigned layer)
3150 {
3151 switch (plane) {
3152 case 0:
3153 if (gfx_level >= GFX9) {
3154 return surf->u.gfx9.surf_offset +
3155 layer * surf->u.gfx9.surf_slice_size;
3156 } else {
3157 return (uint64_t)surf->u.legacy.level[0].offset_256B * 256 +
3158 layer * (uint64_t)surf->u.legacy.level[0].slice_size_dw * 4;
3159 }
3160 case 1:
3161 assert(!layer);
3162 return surf->display_dcc_offset ?
3163 surf->display_dcc_offset : surf->meta_offset;
3164 case 2:
3165 assert(!layer);
3166 return surf->meta_offset;
3167 default:
3168 unreachable("Invalid plane index");
3169 }
3170 }
3171
ac_surface_get_plane_stride(enum amd_gfx_level gfx_level,const struct radeon_surf * surf,unsigned plane,unsigned level)3172 uint64_t ac_surface_get_plane_stride(enum amd_gfx_level gfx_level,
3173 const struct radeon_surf *surf,
3174 unsigned plane, unsigned level)
3175 {
3176 switch (plane) {
3177 case 0:
3178 if (gfx_level >= GFX9) {
3179 return (surf->is_linear ? surf->u.gfx9.pitch[level] : surf->u.gfx9.surf_pitch) * surf->bpe;
3180 } else {
3181 return surf->u.legacy.level[level].nblk_x * surf->bpe;
3182 }
3183 case 1:
3184 return 1 + (surf->display_dcc_offset ?
3185 surf->u.gfx9.color.display_dcc_pitch_max : surf->u.gfx9.color.dcc_pitch_max);
3186 case 2:
3187 return surf->u.gfx9.color.dcc_pitch_max + 1;
3188 default:
3189 unreachable("Invalid plane index");
3190 }
3191 }
3192
ac_surface_get_plane_size(const struct radeon_surf * surf,unsigned plane)3193 uint64_t ac_surface_get_plane_size(const struct radeon_surf *surf,
3194 unsigned plane)
3195 {
3196 switch (plane) {
3197 case 0:
3198 return surf->surf_size;
3199 case 1:
3200 return surf->display_dcc_offset ?
3201 surf->u.gfx9.color.display_dcc_size : surf->meta_size;
3202 case 2:
3203 return surf->meta_size;
3204 default:
3205 unreachable("Invalid plane index");
3206 }
3207 }
3208
3209 uint64_t
ac_surface_addr_from_coord(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct radeon_surf * surf,const struct ac_surf_info * surf_info,unsigned level,unsigned x,unsigned y,unsigned layer,bool is_3d)3210 ac_surface_addr_from_coord(struct ac_addrlib *addrlib, const struct radeon_info *info,
3211 const struct radeon_surf *surf, const struct ac_surf_info *surf_info,
3212 unsigned level, unsigned x, unsigned y, unsigned layer, bool is_3d)
3213 {
3214 /* Only implemented for GFX9+ */
3215 assert(info->gfx_level >= GFX9);
3216
3217 ADDR2_COMPUTE_SURFACE_ADDRFROMCOORD_INPUT input = {0};
3218 input.size = sizeof(ADDR2_COMPUTE_SURFACE_ADDRFROMCOORD_INPUT);
3219 input.slice = layer;
3220 input.mipId = level;
3221 input.unalignedWidth = DIV_ROUND_UP(surf_info->width, surf->blk_w);
3222 input.unalignedHeight = DIV_ROUND_UP(surf_info->height, surf->blk_h);
3223 input.numSlices = is_3d ? surf_info->depth : surf_info->array_size;
3224 input.numMipLevels = surf_info->levels;
3225 input.numSamples = surf_info->samples;
3226 input.numFrags = surf_info->samples;
3227 input.swizzleMode = surf->u.gfx9.swizzle_mode;
3228 input.resourceType = (AddrResourceType)surf->u.gfx9.resource_type;
3229 input.pipeBankXor = surf->tile_swizzle;
3230 input.bpp = surf->bpe * 8;
3231 input.x = x;
3232 input.y = y;
3233
3234 ADDR2_COMPUTE_SURFACE_ADDRFROMCOORD_OUTPUT output = {0};
3235 output.size = sizeof(ADDR2_COMPUTE_SURFACE_ADDRFROMCOORD_OUTPUT);
3236 Addr2ComputeSurfaceAddrFromCoord(addrlib->handle, &input, &output);
3237 return output.addr;
3238 }
3239
3240 void
ac_surface_compute_nbc_view(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct radeon_surf * surf,const struct ac_surf_info * surf_info,unsigned level,unsigned layer,struct ac_surf_nbc_view * out)3241 ac_surface_compute_nbc_view(struct ac_addrlib *addrlib, const struct radeon_info *info,
3242 const struct radeon_surf *surf, const struct ac_surf_info *surf_info,
3243 unsigned level, unsigned layer, struct ac_surf_nbc_view *out)
3244 {
3245 /* Only implemented for GFX10+ */
3246 assert(info->gfx_level >= GFX10);
3247
3248 ADDR2_COMPUTE_NONBLOCKCOMPRESSEDVIEW_INPUT input = {0};
3249 input.size = sizeof(ADDR2_COMPUTE_NONBLOCKCOMPRESSEDVIEW_INPUT);
3250 input.swizzleMode = surf->u.gfx9.swizzle_mode;
3251 input.resourceType = (AddrResourceType)surf->u.gfx9.resource_type;
3252 switch (surf->bpe) {
3253 case 8:
3254 input.format = ADDR_FMT_BC1;
3255 break;
3256 case 16:
3257 input.format = ADDR_FMT_BC3;
3258 break;
3259 default:
3260 assert(0);
3261 }
3262 input.width = surf_info->width;
3263 input.height = surf_info->height;
3264 input.numSlices = surf_info->array_size;
3265 input.numMipLevels = surf_info->levels;
3266 input.pipeBankXor = surf->tile_swizzle;
3267 input.slice = layer;
3268 input.mipId = level;
3269
3270 ADDR_E_RETURNCODE res;
3271 ADDR2_COMPUTE_NONBLOCKCOMPRESSEDVIEW_OUTPUT output = {0};
3272 output.size = sizeof(ADDR2_COMPUTE_NONBLOCKCOMPRESSEDVIEW_OUTPUT);
3273 res = Addr2ComputeNonBlockCompressedView(addrlib->handle, &input, &output);
3274 if (res == ADDR_OK) {
3275 out->base_address_offset = output.offset;
3276 out->tile_swizzle = output.pipeBankXor;
3277 out->width = output.unalignedWidth;
3278 out->height = output.unalignedHeight;
3279 out->num_levels = output.numMipLevels;
3280 out->level = output.mipId;
3281 out->valid = true;
3282 } else {
3283 out->valid = false;
3284 }
3285 }
3286
ac_surface_print_info(FILE * out,const struct radeon_info * info,const struct radeon_surf * surf)3287 void ac_surface_print_info(FILE *out, const struct radeon_info *info,
3288 const struct radeon_surf *surf)
3289 {
3290 if (info->gfx_level >= GFX9) {
3291 fprintf(out,
3292 " Surf: size=%" PRIu64 ", slice_size=%" PRIu64 ", "
3293 "alignment=%u, swmode=%u, tile_swizzle=%u, epitch=%u, pitch=%u, blk_w=%u, "
3294 "blk_h=%u, bpe=%u, flags=0x%"PRIx64"\n",
3295 surf->surf_size, surf->u.gfx9.surf_slice_size,
3296 1 << surf->surf_alignment_log2, surf->u.gfx9.swizzle_mode, surf->tile_swizzle,
3297 surf->u.gfx9.epitch, surf->u.gfx9.surf_pitch,
3298 surf->blk_w, surf->blk_h, surf->bpe, surf->flags);
3299
3300 if (surf->fmask_offset)
3301 fprintf(out,
3302 " FMask: offset=%" PRIu64 ", size=%" PRIu64 ", "
3303 "alignment=%u, swmode=%u, epitch=%u\n",
3304 surf->fmask_offset, surf->fmask_size,
3305 1 << surf->fmask_alignment_log2, surf->u.gfx9.color.fmask_swizzle_mode,
3306 surf->u.gfx9.color.fmask_epitch);
3307
3308 if (surf->cmask_offset)
3309 fprintf(out,
3310 " CMask: offset=%" PRIu64 ", size=%u, "
3311 "alignment=%u\n",
3312 surf->cmask_offset, surf->cmask_size,
3313 1 << surf->cmask_alignment_log2);
3314
3315 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER && surf->meta_offset)
3316 fprintf(out,
3317 " HTile: offset=%" PRIu64 ", size=%u, alignment=%u\n",
3318 surf->meta_offset, surf->meta_size,
3319 1 << surf->meta_alignment_log2);
3320
3321 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && surf->meta_offset)
3322 fprintf(out,
3323 " DCC: offset=%" PRIu64 ", size=%u, "
3324 "alignment=%u, pitch_max=%u, num_dcc_levels=%u\n",
3325 surf->meta_offset, surf->meta_size, 1 << surf->meta_alignment_log2,
3326 surf->u.gfx9.color.display_dcc_pitch_max, surf->num_meta_levels);
3327
3328 if (surf->has_stencil)
3329 fprintf(out,
3330 " Stencil: offset=%" PRIu64 ", swmode=%u, epitch=%u\n",
3331 surf->u.gfx9.zs.stencil_offset,
3332 surf->u.gfx9.zs.stencil_swizzle_mode,
3333 surf->u.gfx9.zs.stencil_epitch);
3334 } else {
3335 fprintf(out,
3336 " Surf: size=%" PRIu64 ", alignment=%u, blk_w=%u, blk_h=%u, "
3337 "bpe=%u, flags=0x%"PRIx64"\n",
3338 surf->surf_size, 1 << surf->surf_alignment_log2, surf->blk_w,
3339 surf->blk_h, surf->bpe, surf->flags);
3340
3341 fprintf(out,
3342 " Layout: size=%" PRIu64 ", alignment=%u, bankw=%u, bankh=%u, "
3343 "nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
3344 surf->surf_size, 1 << surf->surf_alignment_log2,
3345 surf->u.legacy.bankw, surf->u.legacy.bankh,
3346 surf->u.legacy.num_banks, surf->u.legacy.mtilea,
3347 surf->u.legacy.tile_split, surf->u.legacy.pipe_config,
3348 (surf->flags & RADEON_SURF_SCANOUT) != 0);
3349
3350 if (surf->fmask_offset)
3351 fprintf(out,
3352 " FMask: offset=%" PRIu64 ", size=%" PRIu64 ", "
3353 "alignment=%u, pitch_in_pixels=%u, bankh=%u, "
3354 "slice_tile_max=%u, tile_mode_index=%u\n",
3355 surf->fmask_offset, surf->fmask_size,
3356 1 << surf->fmask_alignment_log2, surf->u.legacy.color.fmask.pitch_in_pixels,
3357 surf->u.legacy.color.fmask.bankh,
3358 surf->u.legacy.color.fmask.slice_tile_max,
3359 surf->u.legacy.color.fmask.tiling_index);
3360
3361 if (surf->cmask_offset)
3362 fprintf(out,
3363 " CMask: offset=%" PRIu64 ", size=%u, alignment=%u, "
3364 "slice_tile_max=%u\n",
3365 surf->cmask_offset, surf->cmask_size,
3366 1 << surf->cmask_alignment_log2, surf->u.legacy.color.cmask_slice_tile_max);
3367
3368 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER && surf->meta_offset)
3369 fprintf(out, " HTile: offset=%" PRIu64 ", size=%u, alignment=%u\n",
3370 surf->meta_offset, surf->meta_size,
3371 1 << surf->meta_alignment_log2);
3372
3373 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && surf->meta_offset)
3374 fprintf(out, " DCC: offset=%" PRIu64 ", size=%u, alignment=%u\n",
3375 surf->meta_offset, surf->meta_size, 1 << surf->meta_alignment_log2);
3376
3377 if (surf->has_stencil)
3378 fprintf(out, " StencilLayout: tilesplit=%u\n",
3379 surf->u.legacy.stencil_tile_split);
3380 }
3381 }
3382
gfx10_nir_meta_addr_from_coord(nir_builder * b,const struct radeon_info * info,struct gfx9_meta_equation * equation,int blkSizeBias,unsigned blkStart,nir_def * meta_pitch,nir_def * meta_slice_size,nir_def * x,nir_def * y,nir_def * z,nir_def * pipe_xor,nir_def ** bit_position)3383 static nir_def *gfx10_nir_meta_addr_from_coord(nir_builder *b, const struct radeon_info *info,
3384 struct gfx9_meta_equation *equation,
3385 int blkSizeBias, unsigned blkStart,
3386 nir_def *meta_pitch, nir_def *meta_slice_size,
3387 nir_def *x, nir_def *y, nir_def *z,
3388 nir_def *pipe_xor,
3389 nir_def **bit_position)
3390 {
3391 nir_def *zero = nir_imm_int(b, 0);
3392 nir_def *one = nir_imm_int(b, 1);
3393
3394 assert(info->gfx_level >= GFX10);
3395
3396 unsigned meta_block_width_log2 = util_logbase2(equation->meta_block_width);
3397 unsigned meta_block_height_log2 = util_logbase2(equation->meta_block_height);
3398 unsigned blkSizeLog2 = meta_block_width_log2 + meta_block_height_log2 + blkSizeBias;
3399
3400 nir_def *coord[] = {x, y, z, 0};
3401 nir_def *address = zero;
3402
3403 for (unsigned i = blkStart; i < blkSizeLog2 + 1; i++) {
3404 nir_def *v = zero;
3405
3406 for (unsigned c = 0; c < 4; c++) {
3407 unsigned index = i * 4 + c - (blkStart * 4);
3408 if (equation->u.gfx10_bits[index]) {
3409 unsigned mask = equation->u.gfx10_bits[index];
3410 nir_def *bits = coord[c];
3411
3412 while (mask)
3413 v = nir_ixor(b, v, nir_iand(b, nir_ushr_imm(b, bits, u_bit_scan(&mask)), one));
3414 }
3415 }
3416
3417 address = nir_ior(b, address, nir_ishl_imm(b, v, i));
3418 }
3419
3420 unsigned blkMask = (1 << blkSizeLog2) - 1;
3421 unsigned pipeMask = (1 << G_0098F8_NUM_PIPES(info->gb_addr_config)) - 1;
3422 unsigned m_pipeInterleaveLog2 = 8 + G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config);
3423 nir_def *xb = nir_ushr_imm(b, x, meta_block_width_log2);
3424 nir_def *yb = nir_ushr_imm(b, y, meta_block_height_log2);
3425 nir_def *pb = nir_ushr_imm(b, meta_pitch, meta_block_width_log2);
3426 nir_def *blkIndex = nir_iadd(b, nir_imul(b, yb, pb), xb);
3427 nir_def *pipeXor = nir_iand_imm(b, nir_ishl_imm(b, nir_iand_imm(b, pipe_xor, pipeMask),
3428 m_pipeInterleaveLog2), blkMask);
3429
3430 if (bit_position)
3431 *bit_position = nir_ishl_imm(b, nir_iand_imm(b, address, 1), 2);
3432
3433 return nir_iadd(b, nir_iadd(b, nir_imul(b, meta_slice_size, z),
3434 nir_imul(b, blkIndex, nir_ishl_imm(b, one, blkSizeLog2))),
3435 nir_ixor(b, nir_ushr(b, address, one), pipeXor));
3436 }
3437
gfx9_nir_meta_addr_from_coord(nir_builder * b,const struct radeon_info * info,struct gfx9_meta_equation * equation,nir_def * meta_pitch,nir_def * meta_height,nir_def * x,nir_def * y,nir_def * z,nir_def * sample,nir_def * pipe_xor,nir_def ** bit_position)3438 static nir_def *gfx9_nir_meta_addr_from_coord(nir_builder *b, const struct radeon_info *info,
3439 struct gfx9_meta_equation *equation,
3440 nir_def *meta_pitch, nir_def *meta_height,
3441 nir_def *x, nir_def *y, nir_def *z,
3442 nir_def *sample, nir_def *pipe_xor,
3443 nir_def **bit_position)
3444 {
3445 nir_def *zero = nir_imm_int(b, 0);
3446 nir_def *one = nir_imm_int(b, 1);
3447
3448 assert(info->gfx_level >= GFX9);
3449
3450 unsigned meta_block_width_log2 = util_logbase2(equation->meta_block_width);
3451 unsigned meta_block_height_log2 = util_logbase2(equation->meta_block_height);
3452 unsigned meta_block_depth_log2 = util_logbase2(equation->meta_block_depth);
3453
3454 unsigned m_pipeInterleaveLog2 = 8 + G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config);
3455 unsigned numPipeBits = equation->u.gfx9.num_pipe_bits;
3456 nir_def *pitchInBlock = nir_ushr_imm(b, meta_pitch, meta_block_width_log2);
3457 nir_def *sliceSizeInBlock = nir_imul(b, nir_ushr_imm(b, meta_height, meta_block_height_log2),
3458 pitchInBlock);
3459
3460 nir_def *xb = nir_ushr_imm(b, x, meta_block_width_log2);
3461 nir_def *yb = nir_ushr_imm(b, y, meta_block_height_log2);
3462 nir_def *zb = nir_ushr_imm(b, z, meta_block_depth_log2);
3463
3464 nir_def *blockIndex = nir_iadd(b, nir_iadd(b, nir_imul(b, zb, sliceSizeInBlock),
3465 nir_imul(b, yb, pitchInBlock)), xb);
3466 nir_def *coords[] = {x, y, z, sample, blockIndex};
3467
3468 nir_def *address = zero;
3469 unsigned num_bits = equation->u.gfx9.num_bits;
3470 assert(num_bits <= 32);
3471
3472 /* Compute the address up until the last bit that doesn't use the block index. */
3473 for (unsigned i = 0; i < num_bits - 1; i++) {
3474 nir_def *xor = zero;
3475
3476 for (unsigned c = 0; c < 5; c++) {
3477 if (equation->u.gfx9.bit[i].coord[c].dim >= 5)
3478 continue;
3479
3480 assert(equation->u.gfx9.bit[i].coord[c].ord < 32);
3481 nir_def *ison =
3482 nir_iand(b, nir_ushr_imm(b, coords[equation->u.gfx9.bit[i].coord[c].dim],
3483 equation->u.gfx9.bit[i].coord[c].ord), one);
3484
3485 xor = nir_ixor(b, xor, ison);
3486 }
3487 address = nir_ior(b, address, nir_ishl_imm(b, xor, i));
3488 }
3489
3490 /* Fill the remaining bits with the block index. */
3491 unsigned last = num_bits - 1;
3492 address = nir_ior(b, address,
3493 nir_ishl_imm(b, nir_ushr_imm(b, blockIndex,
3494 equation->u.gfx9.bit[last].coord[0].ord),
3495 last));
3496
3497 if (bit_position)
3498 *bit_position = nir_ishl_imm(b, nir_iand_imm(b, address, 1), 2);
3499
3500 nir_def *pipeXor = nir_iand_imm(b, pipe_xor, (1 << numPipeBits) - 1);
3501 return nir_ixor(b, nir_ushr(b, address, one),
3502 nir_ishl_imm(b, pipeXor, m_pipeInterleaveLog2));
3503 }
3504
ac_nir_dcc_addr_from_coord(nir_builder * b,const struct radeon_info * info,unsigned bpe,struct gfx9_meta_equation * equation,nir_def * dcc_pitch,nir_def * dcc_height,nir_def * dcc_slice_size,nir_def * x,nir_def * y,nir_def * z,nir_def * sample,nir_def * pipe_xor)3505 nir_def *ac_nir_dcc_addr_from_coord(nir_builder *b, const struct radeon_info *info,
3506 unsigned bpe, struct gfx9_meta_equation *equation,
3507 nir_def *dcc_pitch, nir_def *dcc_height,
3508 nir_def *dcc_slice_size,
3509 nir_def *x, nir_def *y, nir_def *z,
3510 nir_def *sample, nir_def *pipe_xor)
3511 {
3512 if (info->gfx_level >= GFX10) {
3513 unsigned bpp_log2 = util_logbase2(bpe);
3514
3515 return gfx10_nir_meta_addr_from_coord(b, info, equation, bpp_log2 - 8, 1,
3516 dcc_pitch, dcc_slice_size,
3517 x, y, z, pipe_xor, NULL);
3518 } else {
3519 return gfx9_nir_meta_addr_from_coord(b, info, equation, dcc_pitch,
3520 dcc_height, x, y, z,
3521 sample, pipe_xor, NULL);
3522 }
3523 }
3524
ac_nir_cmask_addr_from_coord(nir_builder * b,const struct radeon_info * info,struct gfx9_meta_equation * equation,nir_def * cmask_pitch,nir_def * cmask_height,nir_def * cmask_slice_size,nir_def * x,nir_def * y,nir_def * z,nir_def * pipe_xor,nir_def ** bit_position)3525 nir_def *ac_nir_cmask_addr_from_coord(nir_builder *b, const struct radeon_info *info,
3526 struct gfx9_meta_equation *equation,
3527 nir_def *cmask_pitch, nir_def *cmask_height,
3528 nir_def *cmask_slice_size,
3529 nir_def *x, nir_def *y, nir_def *z,
3530 nir_def *pipe_xor,
3531 nir_def **bit_position)
3532 {
3533 nir_def *zero = nir_imm_int(b, 0);
3534
3535 if (info->gfx_level >= GFX10) {
3536 return gfx10_nir_meta_addr_from_coord(b, info, equation, -7, 1,
3537 cmask_pitch, cmask_slice_size,
3538 x, y, z, pipe_xor, bit_position);
3539 } else {
3540 return gfx9_nir_meta_addr_from_coord(b, info, equation, cmask_pitch,
3541 cmask_height, x, y, z, zero,
3542 pipe_xor, bit_position);
3543 }
3544 }
3545
ac_nir_htile_addr_from_coord(nir_builder * b,const struct radeon_info * info,struct gfx9_meta_equation * equation,nir_def * htile_pitch,nir_def * htile_slice_size,nir_def * x,nir_def * y,nir_def * z,nir_def * pipe_xor)3546 nir_def *ac_nir_htile_addr_from_coord(nir_builder *b, const struct radeon_info *info,
3547 struct gfx9_meta_equation *equation,
3548 nir_def *htile_pitch,
3549 nir_def *htile_slice_size,
3550 nir_def *x, nir_def *y, nir_def *z,
3551 nir_def *pipe_xor)
3552 {
3553 return gfx10_nir_meta_addr_from_coord(b, info, equation, -4, 2,
3554 htile_pitch, htile_slice_size,
3555 x, y, z, pipe_xor, NULL);
3556 }
3557
ac_get_cb_number_type(enum pipe_format format)3558 unsigned ac_get_cb_number_type(enum pipe_format format)
3559 {
3560 const struct util_format_description *desc = util_format_description(format);
3561 int chan = util_format_get_first_non_void_channel(format);
3562
3563 if (chan == -1 || desc->channel[chan].type == UTIL_FORMAT_TYPE_FLOAT) {
3564 return V_028C70_NUMBER_FLOAT;
3565 } else {
3566 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB) {
3567 return V_028C70_NUMBER_SRGB;
3568 } else if (desc->channel[chan].type == UTIL_FORMAT_TYPE_SIGNED) {
3569 return desc->channel[chan].pure_integer ? V_028C70_NUMBER_SINT : V_028C70_NUMBER_SNORM;
3570 } else if (desc->channel[chan].type == UTIL_FORMAT_TYPE_UNSIGNED) {
3571 return desc->channel[chan].pure_integer ? V_028C70_NUMBER_UINT : V_028C70_NUMBER_UNORM;
3572 } else {
3573 return V_028C70_NUMBER_UNORM;
3574 }
3575 }
3576 }
3577
ac_get_cb_format(enum amd_gfx_level gfx_level,enum pipe_format format)3578 unsigned ac_get_cb_format(enum amd_gfx_level gfx_level, enum pipe_format format)
3579 {
3580 const struct util_format_description *desc = util_format_description(format);
3581
3582 #define HAS_SIZE(x, y, z, w) \
3583 (desc->channel[0].size == (x) && desc->channel[1].size == (y) && \
3584 desc->channel[2].size == (z) && desc->channel[3].size == (w))
3585
3586 if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */
3587 return V_028C70_COLOR_10_11_11;
3588
3589 if (gfx_level >= GFX10_3 &&
3590 format == PIPE_FORMAT_R9G9B9E5_FLOAT) /* isn't plain */
3591 return V_028C70_COLOR_5_9_9_9;
3592
3593 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
3594 return V_028C70_COLOR_INVALID;
3595
3596 /* hw cannot support mixed formats (except depth/stencil, since
3597 * stencil is not written to). */
3598 if (desc->is_mixed && desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS)
3599 return V_028C70_COLOR_INVALID;
3600
3601 int first_non_void = util_format_get_first_non_void_channel(format);
3602
3603 /* Reject SCALED formats because we don't implement them for CB. */
3604 if (first_non_void >= 0 && first_non_void <= 3 &&
3605 (desc->channel[first_non_void].type == UTIL_FORMAT_TYPE_UNSIGNED ||
3606 desc->channel[first_non_void].type == UTIL_FORMAT_TYPE_SIGNED) &&
3607 !desc->channel[first_non_void].normalized &&
3608 !desc->channel[first_non_void].pure_integer)
3609 return V_028C70_COLOR_INVALID;
3610
3611 switch (desc->nr_channels) {
3612 case 1:
3613 switch (desc->channel[0].size) {
3614 case 8:
3615 return V_028C70_COLOR_8;
3616 case 16:
3617 return V_028C70_COLOR_16;
3618 case 32:
3619 return V_028C70_COLOR_32;
3620 case 64:
3621 return V_028C70_COLOR_32_32;
3622 }
3623 break;
3624 case 2:
3625 if (desc->channel[0].size == desc->channel[1].size) {
3626 switch (desc->channel[0].size) {
3627 case 8:
3628 return V_028C70_COLOR_8_8;
3629 case 16:
3630 return V_028C70_COLOR_16_16;
3631 case 32:
3632 return V_028C70_COLOR_32_32;
3633 }
3634 } else if (HAS_SIZE(8, 24, 0, 0)) {
3635 return V_028C70_COLOR_24_8;
3636 } else if (HAS_SIZE(24, 8, 0, 0)) {
3637 return V_028C70_COLOR_8_24;
3638 }
3639 break;
3640 case 3:
3641 if (HAS_SIZE(5, 6, 5, 0)) {
3642 return V_028C70_COLOR_5_6_5;
3643 } else if (HAS_SIZE(32, 8, 24, 0)) {
3644 return V_028C70_COLOR_X24_8_32_FLOAT;
3645 }
3646 break;
3647 case 4:
3648 if (desc->channel[0].size == desc->channel[1].size &&
3649 desc->channel[0].size == desc->channel[2].size &&
3650 desc->channel[0].size == desc->channel[3].size) {
3651 switch (desc->channel[0].size) {
3652 case 4:
3653 return V_028C70_COLOR_4_4_4_4;
3654 case 8:
3655 return V_028C70_COLOR_8_8_8_8;
3656 case 16:
3657 return V_028C70_COLOR_16_16_16_16;
3658 case 32:
3659 return V_028C70_COLOR_32_32_32_32;
3660 }
3661 } else if (HAS_SIZE(5, 5, 5, 1)) {
3662 return V_028C70_COLOR_1_5_5_5;
3663 } else if (HAS_SIZE(1, 5, 5, 5)) {
3664 return V_028C70_COLOR_5_5_5_1;
3665 } else if (HAS_SIZE(10, 10, 10, 2)) {
3666 return V_028C70_COLOR_2_10_10_10;
3667 } else if (HAS_SIZE(2, 10, 10, 10)) {
3668 return V_028C70_COLOR_10_10_10_2;
3669 }
3670 break;
3671 }
3672 return V_028C70_COLOR_INVALID;
3673 }
3674