1 /*
2 * Copyright © 2011 Red Hat All Rights Reserved.
3 * Copyright © 2017 Advanced Micro Devices, Inc.
4 *
5 * SPDX-License-Identifier: MIT
6 */
7
8 #define AC_SURFACE_INCLUDE_NIR
9 #include "ac_surface.h"
10
11 #include "ac_drm_fourcc.h"
12 #include "ac_gpu_info.h"
13 #include "addrlib/inc/addrinterface.h"
14 #include "addrlib/src/amdgpu_asic_addr.h"
15 #include "amd_family.h"
16 #include "sid.h"
17 #include "util/hash_table.h"
18 #include "util/macros.h"
19 #include "util/simple_mtx.h"
20 #include "util/u_atomic.h"
21 #include "util/format/u_format.h"
22 #include "util/u_math.h"
23 #include "util/u_memory.h"
24
25 #include <errno.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28
29 #ifdef _WIN32
30 #define AMDGPU_TILING_ARRAY_MODE_SHIFT 0
31 #define AMDGPU_TILING_ARRAY_MODE_MASK 0xf
32 #define AMDGPU_TILING_PIPE_CONFIG_SHIFT 4
33 #define AMDGPU_TILING_PIPE_CONFIG_MASK 0x1f
34 #define AMDGPU_TILING_TILE_SPLIT_SHIFT 9
35 #define AMDGPU_TILING_TILE_SPLIT_MASK 0x7
36 #define AMDGPU_TILING_MICRO_TILE_MODE_SHIFT 12
37 #define AMDGPU_TILING_MICRO_TILE_MODE_MASK 0x7
38 #define AMDGPU_TILING_BANK_WIDTH_SHIFT 15
39 #define AMDGPU_TILING_BANK_WIDTH_MASK 0x3
40 #define AMDGPU_TILING_BANK_HEIGHT_SHIFT 17
41 #define AMDGPU_TILING_BANK_HEIGHT_MASK 0x3
42 #define AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT 19
43 #define AMDGPU_TILING_MACRO_TILE_ASPECT_MASK 0x3
44 #define AMDGPU_TILING_NUM_BANKS_SHIFT 21
45 #define AMDGPU_TILING_NUM_BANKS_MASK 0x3
46 #define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
47 #define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
48 #define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5
49 #define AMDGPU_TILING_DCC_OFFSET_256B_MASK 0xFFFFFF
50 #define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT 29
51 #define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF
52 #define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43
53 #define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1
54 #define AMDGPU_TILING_DCC_INDEPENDENT_128B_SHIFT 44
55 #define AMDGPU_TILING_DCC_INDEPENDENT_128B_MASK 0x1
56 #define AMDGPU_TILING_SCANOUT_SHIFT 63
57 #define AMDGPU_TILING_SCANOUT_MASK 0x1
58 #define AMDGPU_TILING_GFX12_SWIZZLE_MODE_SHIFT 0
59 #define AMDGPU_TILING_GFX12_SWIZZLE_MODE_MASK 0x7
60 #define AMDGPU_TILING_GFX12_SCANOUT_SHIFT 63
61 #define AMDGPU_TILING_GFX12_SCANOUT_MASK 0x1
62 #define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_SHIFT 3
63 #define AMDGPU_TILING_GFX12_DCC_MAX_COMPRESSED_BLOCK_MASK 0x3
64 #define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_SHIFT 5
65 #define AMDGPU_TILING_GFX12_DCC_NUMBER_TYPE_MASK 0x7
66 #define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_SHIFT 8
67 #define AMDGPU_TILING_GFX12_DCC_DATA_FORMAT_MASK 0x3f
68 #define AMDGPU_TILING_SET(field, value) \
69 (((__u64)(value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
70 #define AMDGPU_TILING_GET(value, field) \
71 (((__u64)(value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK)
72 #else
73 #include "drm-uapi/amdgpu_drm.h"
74 #endif
75
76 #ifndef CIASICIDGFXENGINE_SOUTHERNISLAND
77 #define CIASICIDGFXENGINE_SOUTHERNISLAND 0x0000000A
78 #endif
79
80 #ifndef CIASICIDGFXENGINE_ARCTICISLAND
81 #define CIASICIDGFXENGINE_ARCTICISLAND 0x0000000D
82 #endif
83
84 struct ac_addrlib {
85 ADDR_HANDLE handle;
86 simple_mtx_t lock;
87 };
88
ac_pipe_config_to_num_pipes(unsigned pipe_config)89 unsigned ac_pipe_config_to_num_pipes(unsigned pipe_config)
90 {
91 switch (pipe_config) {
92 case V_009910_ADDR_SURF_P2:
93 return 2;
94 case V_009910_ADDR_SURF_P4_8x16:
95 case V_009910_ADDR_SURF_P4_16x16:
96 case V_009910_ADDR_SURF_P4_16x32:
97 case V_009910_ADDR_SURF_P4_32x32:
98 return 4;
99 case V_009910_ADDR_SURF_P8_16x16_8x16:
100 case V_009910_ADDR_SURF_P8_16x32_8x16:
101 case V_009910_ADDR_SURF_P8_32x32_8x16:
102 case V_009910_ADDR_SURF_P8_16x32_16x16:
103 case V_009910_ADDR_SURF_P8_32x32_16x16:
104 case V_009910_ADDR_SURF_P8_32x32_16x32:
105 case V_009910_ADDR_SURF_P8_32x64_32x32:
106 return 8;
107 case V_009910_ADDR_SURF_P16_32x32_8x16:
108 case V_009910_ADDR_SURF_P16_32x32_16x16:
109 return 16;
110 default:
111 unreachable("invalid pipe_config");
112 }
113 }
114
ac_modifier_has_dcc(uint64_t modifier)115 bool ac_modifier_has_dcc(uint64_t modifier)
116 {
117 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
118 }
119
ac_modifier_has_dcc_retile(uint64_t modifier)120 bool ac_modifier_has_dcc_retile(uint64_t modifier)
121 {
122 return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC_RETILE, modifier);
123 }
124
ac_modifier_supports_dcc_image_stores(enum amd_gfx_level gfx_level,uint64_t modifier)125 bool ac_modifier_supports_dcc_image_stores(enum amd_gfx_level gfx_level, uint64_t modifier)
126 {
127 if (!ac_modifier_has_dcc(modifier))
128 return false;
129
130 if (gfx_level >= GFX12)
131 return true;
132
133 return (!AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier) &&
134 AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier) &&
135 AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier) == AMD_FMT_MOD_DCC_BLOCK_128B) ||
136 (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS && /* gfx10.3 */
137 AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier) &&
138 AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier) &&
139 AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier) == AMD_FMT_MOD_DCC_BLOCK_64B) ||
140 (gfx_level >= GFX11_5 &&
141 AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX11 &&
142 !AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier) &&
143 AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier) &&
144 AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier) == AMD_FMT_MOD_DCC_BLOCK_256B);
145
146 }
147
148
ac_surface_supports_dcc_image_stores(enum amd_gfx_level gfx_level,const struct radeon_surf * surf)149 bool ac_surface_supports_dcc_image_stores(enum amd_gfx_level gfx_level,
150 const struct radeon_surf *surf)
151 {
152 /* DCC image stores is only available for GFX10+. */
153 if (gfx_level < GFX10)
154 return false;
155
156 if (gfx_level >= GFX12)
157 return true;
158
159 /* DCC image stores support the following settings:
160 * - INDEPENDENT_64B_BLOCKS = 0
161 * - INDEPENDENT_128B_BLOCKS = 1
162 * - MAX_COMPRESSED_BLOCK_SIZE = 128B
163 * - MAX_UNCOMPRESSED_BLOCK_SIZE = 256B (always used)
164 *
165 * gfx10.3 also supports the following setting:
166 * - INDEPENDENT_64B_BLOCKS = 1
167 * - INDEPENDENT_128B_BLOCKS = 1
168 * - MAX_COMPRESSED_BLOCK_SIZE = 64B
169 * - MAX_UNCOMPRESSED_BLOCK_SIZE = 256B (always used)
170 *
171 * gfx11.5 also supports the following:
172 * - INDEPENDENT_64B_BLOCKS = 0
173 * - INDEPENDENT_128B_BLOCKS = 1
174 * - MAX_COMPRESSED_BLOCK_SIZE = 256B
175 * - MAX_UNCOMPRESSED_BLOCK_SIZE = 256B (always used)
176 *
177 * The compressor only looks at MAX_COMPRESSED_BLOCK_SIZE to determine
178 * the INDEPENDENT_xx_BLOCKS settings. 128B implies INDEP_128B, while 64B
179 * implies INDEP_64B && INDEP_128B.
180 *
181 * The same limitations apply to SDMA compressed stores because
182 * SDMA uses the same DCC codec.
183 */
184 return (!surf->u.gfx9.color.dcc.independent_64B_blocks &&
185 surf->u.gfx9.color.dcc.independent_128B_blocks &&
186 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_128B) ||
187 (gfx_level >= GFX10_3 && /* gfx10.3 - old 64B compression */
188 surf->u.gfx9.color.dcc.independent_64B_blocks &&
189 surf->u.gfx9.color.dcc.independent_128B_blocks &&
190 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B) ||
191 (gfx_level >= GFX11_5 && /* gfx11.5 - new 256B compression */
192 !surf->u.gfx9.color.dcc.independent_64B_blocks &&
193 surf->u.gfx9.color.dcc.independent_128B_blocks &&
194 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_256B);
195 }
196
ac_get_modifier_swizzle_mode(enum amd_gfx_level gfx_level,uint64_t modifier)197 static unsigned ac_get_modifier_swizzle_mode(enum amd_gfx_level gfx_level, uint64_t modifier)
198 {
199 if (modifier == DRM_FORMAT_MOD_LINEAR)
200 return ADDR_SW_LINEAR;
201
202 if (gfx_level >= GFX12 &&
203 AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX11) {
204 /* The Gfx11 swizzle mode needs to be translated to Gfx12. */
205 if (AMD_FMT_MOD_GET(TILE, modifier) == AMD_FMT_MOD_TILE_GFX9_64K_D)
206 return AMD_FMT_MOD_TILE_GFX12_64K_2D;
207
208 assert(0);
209 return ADDR_SW_MAX_TYPE; /* can't translate */
210 }
211
212 return AMD_FMT_MOD_GET(TILE, modifier);
213 }
214
215 static void
ac_modifier_fill_dcc_params(uint64_t modifier,struct radeon_surf * surf,ADDR2_COMPUTE_SURFACE_INFO_INPUT * surf_info)216 ac_modifier_fill_dcc_params(uint64_t modifier, struct radeon_surf *surf,
217 ADDR2_COMPUTE_SURFACE_INFO_INPUT *surf_info)
218 {
219 assert(ac_modifier_has_dcc(modifier));
220 assert(AMD_FMT_MOD_GET(TILE_VERSION, modifier) < AMD_FMT_MOD_TILE_VER_GFX12);
221
222 if (AMD_FMT_MOD_GET(DCC_RETILE, modifier)) {
223 surf_info->flags.metaPipeUnaligned = 0;
224 } else {
225 surf_info->flags.metaPipeUnaligned = !AMD_FMT_MOD_GET(DCC_PIPE_ALIGN, modifier);
226 }
227
228 /* The metaPipeUnaligned is not strictly necessary, but ensure we don't set metaRbUnaligned on
229 * non-displayable DCC surfaces just because num_render_backends = 1 */
230 surf_info->flags.metaRbUnaligned = AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
231 AMD_FMT_MOD_GET(RB, modifier) == 0 &&
232 surf_info->flags.metaPipeUnaligned;
233
234 surf->u.gfx9.color.dcc.independent_64B_blocks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
235 surf->u.gfx9.color.dcc.independent_128B_blocks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
236 surf->u.gfx9.color.dcc.max_compressed_block_size = AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, modifier);
237 }
238
ac_is_modifier_supported(const struct radeon_info * info,const struct ac_modifier_options * options,enum pipe_format format,uint64_t modifier)239 bool ac_is_modifier_supported(const struct radeon_info *info,
240 const struct ac_modifier_options *options,
241 enum pipe_format format,
242 uint64_t modifier)
243 {
244
245 if (util_format_is_compressed(format) ||
246 util_format_is_depth_or_stencil(format) ||
247 util_format_get_blocksizebits(format) > 64)
248 return false;
249
250 if (info->gfx_level < GFX9)
251 return false;
252
253 if(modifier == DRM_FORMAT_MOD_LINEAR)
254 return true;
255
256 /* GFX8 may need a different modifier for each plane */
257 if (info->gfx_level < GFX9 && util_format_get_num_planes(format) > 1)
258 return false;
259
260 uint32_t allowed_swizzles = 0xFFFFFFFF;
261 switch(info->gfx_level) {
262 case GFX9:
263 allowed_swizzles = ac_modifier_has_dcc(modifier) ? 0x06000000 : 0x06660660;
264 break;
265 case GFX10:
266 case GFX10_3:
267 allowed_swizzles = ac_modifier_has_dcc(modifier) ? 0x08000000 : 0x0E660660;
268 break;
269 case GFX11:
270 case GFX11_5:
271 allowed_swizzles = ac_modifier_has_dcc(modifier) ? 0x88000000 : 0xCC440440;
272 break;
273 case GFX12:
274 allowed_swizzles = 0x1E; /* all 2D swizzle modes */
275 break;
276 default:
277 return false;
278 }
279
280 if (!((1u << ac_get_modifier_swizzle_mode(info->gfx_level, modifier)) & allowed_swizzles))
281 return false;
282
283 if (ac_modifier_has_dcc(modifier)) {
284 /* TODO: support multi-planar formats with DCC */
285 if (util_format_get_num_planes(format) > 1)
286 return false;
287
288 if (!info->has_graphics)
289 return false;
290
291 if (!options->dcc)
292 return false;
293
294 if (ac_modifier_has_dcc_retile(modifier)) {
295 /* radeonsi and radv retiling shaders only support bpe == 32. */
296 if (util_format_get_blocksizebits(format) != 32)
297 return false;
298 if (!info->use_display_dcc_with_retile_blit || !options->dcc_retile)
299 return false;
300 }
301 }
302
303 return true;
304 }
305
ac_get_supported_modifiers(const struct radeon_info * info,const struct ac_modifier_options * options,enum pipe_format format,unsigned * mod_count,uint64_t * mods)306 bool ac_get_supported_modifiers(const struct radeon_info *info,
307 const struct ac_modifier_options *options,
308 enum pipe_format format,
309 unsigned *mod_count,
310 uint64_t *mods)
311 {
312 unsigned current_mod = 0;
313
314 #define ADD_MOD(name) \
315 if (ac_is_modifier_supported(info, options, format, (name))) { \
316 if (mods && current_mod < *mod_count) \
317 mods[current_mod] = (name); \
318 ++current_mod; \
319 }
320
321 /* The modifiers have to be added in descending order of estimated
322 * performance. The drivers will prefer modifiers that come earlier
323 * in the list. */
324 switch (info->gfx_level) {
325 case GFX9: {
326 unsigned pipe_xor_bits = MIN2(G_0098F8_NUM_PIPES(info->gb_addr_config) +
327 G_0098F8_NUM_SHADER_ENGINES_GFX9(info->gb_addr_config), 8);
328 unsigned bank_xor_bits = MIN2(G_0098F8_NUM_BANKS(info->gb_addr_config), 8 - pipe_xor_bits);
329 unsigned pipes = G_0098F8_NUM_PIPES(info->gb_addr_config);
330 unsigned rb = G_0098F8_NUM_RB_PER_SE(info->gb_addr_config) +
331 G_0098F8_NUM_SHADER_ENGINES_GFX9(info->gb_addr_config);
332
333 uint64_t common_dcc = AMD_FMT_MOD_SET(DCC, 1) |
334 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
335 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
336 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, info->has_dcc_constant_encode) |
337 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
338 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits);
339
340 ADD_MOD(AMD_FMT_MOD |
341 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
342 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
343 AMD_FMT_MOD_SET(DCC_PIPE_ALIGN, 1) |
344 common_dcc |
345 AMD_FMT_MOD_SET(PIPE, pipes) |
346 AMD_FMT_MOD_SET(RB, rb))
347
348 ADD_MOD(AMD_FMT_MOD |
349 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
350 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
351 AMD_FMT_MOD_SET(DCC_PIPE_ALIGN, 1) |
352 common_dcc |
353 AMD_FMT_MOD_SET(PIPE, pipes) |
354 AMD_FMT_MOD_SET(RB, rb))
355
356 if (util_format_get_blocksizebits(format) == 32) {
357 if (info->max_render_backends == 1) {
358 ADD_MOD(AMD_FMT_MOD |
359 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
360 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
361 common_dcc);
362 }
363
364
365 ADD_MOD(AMD_FMT_MOD |
366 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
367 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
368 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
369 common_dcc |
370 AMD_FMT_MOD_SET(PIPE, pipes) |
371 AMD_FMT_MOD_SET(RB, rb))
372 }
373
374
375 ADD_MOD(AMD_FMT_MOD |
376 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
377 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
378 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
379 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
380
381 ADD_MOD(AMD_FMT_MOD |
382 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
383 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
384 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
385 AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
386
387 ADD_MOD(AMD_FMT_MOD |
388 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
389 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
390
391 ADD_MOD(AMD_FMT_MOD |
392 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
393 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
394
395 ADD_MOD(DRM_FORMAT_MOD_LINEAR)
396 break;
397 }
398 case GFX10:
399 case GFX10_3: {
400 bool rbplus = info->gfx_level >= GFX10_3;
401 unsigned pipe_xor_bits = G_0098F8_NUM_PIPES(info->gb_addr_config);
402 unsigned pkrs = rbplus ? G_0098F8_NUM_PKRS(info->gb_addr_config) : 0;
403
404 unsigned version = rbplus ? AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS : AMD_FMT_MOD_TILE_VER_GFX10;
405 uint64_t common_dcc = AMD_FMT_MOD_SET(TILE_VERSION, version) |
406 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
407 AMD_FMT_MOD_SET(DCC, 1) |
408 AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
409 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
410 AMD_FMT_MOD_SET(PACKERS, pkrs);
411
412 ADD_MOD(AMD_FMT_MOD | common_dcc |
413 AMD_FMT_MOD_SET(DCC_PIPE_ALIGN, 1) |
414 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
415 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B))
416
417 if (info->gfx_level >= GFX10_3) {
418 ADD_MOD(AMD_FMT_MOD | common_dcc |
419 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
420 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
421 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B))
422
423 ADD_MOD(AMD_FMT_MOD | common_dcc |
424 AMD_FMT_MOD_SET(DCC_RETILE, 1) |
425 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
426 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
427 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B))
428 }
429
430 ADD_MOD(AMD_FMT_MOD |
431 AMD_FMT_MOD_SET(TILE_VERSION, version) |
432 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
433 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
434 AMD_FMT_MOD_SET(PACKERS, pkrs))
435
436 ADD_MOD(AMD_FMT_MOD |
437 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
438 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
439 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits))
440
441 if (util_format_get_blocksizebits(format) != 32) {
442 ADD_MOD(AMD_FMT_MOD |
443 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
444 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
445 }
446
447 ADD_MOD(AMD_FMT_MOD |
448 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
449 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
450
451 ADD_MOD(DRM_FORMAT_MOD_LINEAR)
452 break;
453 }
454 case GFX11:
455 case GFX11_5: {
456 /* GFX11 has new microblock organization. No S modes for 2D. */
457 unsigned pipe_xor_bits = G_0098F8_NUM_PIPES(info->gb_addr_config);
458 unsigned pkrs = G_0098F8_NUM_PKRS(info->gb_addr_config);
459 unsigned num_pipes = 1 << pipe_xor_bits;
460
461 /* R_X swizzle modes are the best for rendering and DCC requires them. */
462 for (unsigned i = 0; i < 2; i++) {
463 unsigned swizzle_r_x;
464
465 /* Insert the best one first. */
466 if (num_pipes > 16)
467 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X;
468 else
469 swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X;
470
471 /* Disable 256K on APUs because it doesn't work with DAL. */
472 if (!info->has_dedicated_vram && swizzle_r_x == AMD_FMT_MOD_TILE_GFX11_256K_R_X)
473 continue;
474
475 uint64_t modifier_r_x = AMD_FMT_MOD |
476 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
477 AMD_FMT_MOD_SET(TILE, swizzle_r_x) |
478 AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
479 AMD_FMT_MOD_SET(PACKERS, pkrs);
480
481 /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */
482 uint64_t modifier_dcc_best_gfx11_5 = modifier_r_x |
483 AMD_FMT_MOD_SET(DCC, 1) |
484 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
485 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
486 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_256B);
487
488 uint64_t modifier_dcc_best = modifier_r_x |
489 AMD_FMT_MOD_SET(DCC, 1) |
490 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) |
491 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
492 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
493
494 /* DCC settings for 4K and greater resolutions. (required by display hw) */
495 uint64_t modifier_dcc_4k = modifier_r_x |
496 AMD_FMT_MOD_SET(DCC, 1) |
497 AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
498 AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
499 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
500
501 /* Modifiers have to be sorted from best to worst.
502 *
503 * Top level order:
504 * 1. The best chip-specific modifiers with DCC, potentially non-displayable.
505 * 2. Chip-specific displayable modifiers with DCC.
506 * 3. Chip-specific displayable modifiers without DCC.
507 * 4. Chip-independent modifiers without DCC.
508 * 5. Linear.
509 */
510
511 /* Add the best non-displayable modifier first. */
512 if (info->gfx_level == GFX11_5)
513 ADD_MOD(modifier_dcc_best_gfx11_5 | AMD_FMT_MOD_SET(DCC_PIPE_ALIGN, 1));
514
515 ADD_MOD(modifier_dcc_best | AMD_FMT_MOD_SET(DCC_PIPE_ALIGN, 1));
516
517 /* Displayable modifiers are next. */
518 /* Add other displayable DCC settings. (DCC_RETILE implies displayable on all chips) */
519 ADD_MOD(modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1))
520 ADD_MOD(modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1))
521
522 /* Add one without DCC that is displayable (it's also optimal for non-displayable cases). */
523 ADD_MOD(modifier_r_x)
524 }
525
526 /* Add one that is compatible with other gfx11 chips. */
527 ADD_MOD(AMD_FMT_MOD |
528 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
529 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D))
530
531 /* Linear must be last. */
532 ADD_MOD(DRM_FORMAT_MOD_LINEAR)
533 break;
534 }
535 case GFX12: {
536 /* Chip properties no longer affect tiling, and there is no distinction between displayable
537 * and non-displayable anymore. (DCC settings may affect displayability though)
538 *
539 * Only declare 64K modifiers for now.
540 */
541 uint64_t mod_gfx12 = AMD_FMT_MOD |
542 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX12);
543
544 uint64_t mod_256K_2D = mod_gfx12 | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256K_2D);
545 uint64_t mod_64K_2D = mod_gfx12 | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_64K_2D);
546 uint64_t mod_4K_2D = mod_gfx12 | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_4K_2D);
547 uint64_t mod_256B_2D = mod_gfx12 | AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX12_256B_2D);
548
549 /* This is identical to GFX12_64K_2D, but expressed in terms of VER_GFX11. */
550 uint64_t mod_64K_2D_as_gfx11 = AMD_FMT_MOD |
551 AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) |
552 AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D);
553
554 /* Expose both all compressed blocks. */
555 uint64_t dcc_256B = AMD_FMT_MOD_SET(DCC, 1) |
556 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_256B);
557 uint64_t dcc_128B = AMD_FMT_MOD_SET(DCC, 1) |
558 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B);
559 uint64_t dcc_64B = AMD_FMT_MOD_SET(DCC, 1) |
560 AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B);
561
562 /* Modifiers must be sorted from best to worst. */
563 ADD_MOD(mod_64K_2D | dcc_256B) /* 64K with DCC and 256B compressed blocks */
564 ADD_MOD(mod_64K_2D | dcc_128B) /* 64K with DCC and 128B compressed blocks */
565 ADD_MOD(mod_64K_2D | dcc_64B) /* 64K with DCC and 64B compressed blocks */
566 ADD_MOD(mod_256K_2D | dcc_256B) /* OpenGL exported modifier */
567 ADD_MOD(mod_4K_2D | dcc_256B) /* OpenGL exported modifier */
568 ADD_MOD(mod_256B_2D | dcc_256B) /* OpenGL exported modifier */
569 /* Without DCC is last. */
570 ADD_MOD(mod_64K_2D) /* 64K without DCC */
571 ADD_MOD(mod_64K_2D_as_gfx11) /* the same as above, but for gfx11 interop */
572 ADD_MOD(mod_256B_2D)
573 ADD_MOD(DRM_FORMAT_MOD_LINEAR)
574 break;
575 }
576 default:
577 break;
578 }
579
580 #undef ADD_MOD
581
582 if (!mods) {
583 *mod_count = current_mod;
584 return true;
585 }
586
587 bool complete = current_mod <= *mod_count;
588 *mod_count = MIN2(*mod_count, current_mod);
589 return complete;
590 }
591
allocSysMem(const ADDR_ALLOCSYSMEM_INPUT * pInput)592 static void *ADDR_API allocSysMem(const ADDR_ALLOCSYSMEM_INPUT *pInput)
593 {
594 return malloc(pInput->sizeInBytes);
595 }
596
freeSysMem(const ADDR_FREESYSMEM_INPUT * pInput)597 static ADDR_E_RETURNCODE ADDR_API freeSysMem(const ADDR_FREESYSMEM_INPUT *pInput)
598 {
599 free(pInput->pVirtAddr);
600 return ADDR_OK;
601 }
602
ac_addrlib_create(const struct radeon_info * info,uint64_t * max_alignment)603 struct ac_addrlib *ac_addrlib_create(const struct radeon_info *info,
604 uint64_t *max_alignment)
605 {
606 ADDR_CREATE_INPUT addrCreateInput = {0};
607 ADDR_CREATE_OUTPUT addrCreateOutput = {0};
608 ADDR_REGISTER_VALUE regValue = {0};
609 ADDR_CREATE_FLAGS createFlags = {{0}};
610 ADDR_GET_MAX_ALIGNMENTS_OUTPUT addrGetMaxAlignmentsOutput = {0};
611 ADDR_E_RETURNCODE addrRet;
612
613 addrCreateInput.size = sizeof(ADDR_CREATE_INPUT);
614 addrCreateOutput.size = sizeof(ADDR_CREATE_OUTPUT);
615
616 regValue.gbAddrConfig = info->gb_addr_config;
617 createFlags.value = 0;
618
619 addrCreateInput.chipFamily = info->family_id;
620 addrCreateInput.chipRevision = info->chip_external_rev;
621
622 if (addrCreateInput.chipFamily == FAMILY_UNKNOWN)
623 return NULL;
624
625 if (addrCreateInput.chipFamily >= FAMILY_AI) {
626 addrCreateInput.chipEngine = CIASICIDGFXENGINE_ARCTICISLAND;
627 } else {
628 regValue.noOfBanks = info->mc_arb_ramcfg & 0x3;
629 regValue.noOfRanks = (info->mc_arb_ramcfg & 0x4) >> 2;
630
631 regValue.backendDisables = info->enabled_rb_mask;
632 regValue.pTileConfig = info->si_tile_mode_array;
633 regValue.noOfEntries = ARRAY_SIZE(info->si_tile_mode_array);
634 if (addrCreateInput.chipFamily == FAMILY_SI) {
635 regValue.pMacroTileConfig = NULL;
636 regValue.noOfMacroEntries = 0;
637 } else {
638 regValue.pMacroTileConfig = info->cik_macrotile_mode_array;
639 regValue.noOfMacroEntries = ARRAY_SIZE(info->cik_macrotile_mode_array);
640 }
641
642 createFlags.useTileIndex = 1;
643 createFlags.useHtileSliceAlign = 1;
644
645 addrCreateInput.chipEngine = CIASICIDGFXENGINE_SOUTHERNISLAND;
646 }
647
648 addrCreateInput.callbacks.allocSysMem = allocSysMem;
649 addrCreateInput.callbacks.freeSysMem = freeSysMem;
650 addrCreateInput.callbacks.debugPrint = 0;
651 addrCreateInput.createFlags = createFlags;
652 addrCreateInput.regValue = regValue;
653
654 addrRet = AddrCreate(&addrCreateInput, &addrCreateOutput);
655 if (addrRet != ADDR_OK)
656 return NULL;
657
658 if (max_alignment) {
659 addrRet = AddrGetMaxAlignments(addrCreateOutput.hLib, &addrGetMaxAlignmentsOutput);
660 if (addrRet == ADDR_OK) {
661 *max_alignment = addrGetMaxAlignmentsOutput.baseAlign;
662 }
663 }
664
665 struct ac_addrlib *addrlib = calloc(1, sizeof(struct ac_addrlib));
666 if (!addrlib) {
667 AddrDestroy(addrCreateOutput.hLib);
668 return NULL;
669 }
670
671 addrlib->handle = addrCreateOutput.hLib;
672 simple_mtx_init(&addrlib->lock, mtx_plain);
673 return addrlib;
674 }
675
ac_addrlib_destroy(struct ac_addrlib * addrlib)676 void ac_addrlib_destroy(struct ac_addrlib *addrlib)
677 {
678 simple_mtx_destroy(&addrlib->lock);
679 AddrDestroy(addrlib->handle);
680 free(addrlib);
681 }
682
ac_addrlib_get_handle(struct ac_addrlib * addrlib)683 void *ac_addrlib_get_handle(struct ac_addrlib *addrlib)
684 {
685 return addrlib->handle;
686 }
687
surf_config_sanity(const struct ac_surf_config * config,unsigned flags)688 static int surf_config_sanity(const struct ac_surf_config *config, unsigned flags)
689 {
690 /* FMASK is allocated together with the color surface and can't be
691 * allocated separately.
692 */
693 assert(!(flags & RADEON_SURF_FMASK));
694 if (flags & RADEON_SURF_FMASK)
695 return -EINVAL;
696
697 /* all dimension must be at least 1 ! */
698 if (!config->info.width || !config->info.height || !config->info.depth ||
699 !config->info.array_size || !config->info.levels)
700 return -EINVAL;
701
702 switch (config->info.samples) {
703 case 0:
704 case 1:
705 case 2:
706 case 4:
707 case 8:
708 break;
709 case 16:
710 if (flags & RADEON_SURF_Z_OR_SBUFFER)
711 return -EINVAL;
712 break;
713 default:
714 return -EINVAL;
715 }
716
717 if (!(flags & RADEON_SURF_Z_OR_SBUFFER)) {
718 switch (config->info.storage_samples) {
719 case 0:
720 case 1:
721 case 2:
722 case 4:
723 case 8:
724 break;
725 default:
726 return -EINVAL;
727 }
728 }
729
730 if (config->is_3d && config->info.array_size > 1)
731 return -EINVAL;
732 if (config->is_cube && config->info.depth > 1)
733 return -EINVAL;
734
735 return 0;
736 }
737
bpe_to_format(struct radeon_surf * surf)738 static unsigned bpe_to_format(struct radeon_surf *surf)
739 {
740 if (surf->blk_w != 1 || surf->blk_h != 1) {
741 if (surf->blk_w == 4 && surf->blk_h == 4) {
742 switch (surf->bpe) {
743 case 8:
744 return ADDR_FMT_BC1;
745 case 16:
746 /* since BC3 and ASTC4x4 has same blk dimension and bpe reporting BC3 also for ASTC4x4.
747 * matching is fine since addrlib needs only blk_w, blk_h and bpe to compute surface
748 * properties.
749 * TODO: If compress_type can be passed to this function, then this ugly BC3 and ASTC4x4
750 * matching can be avoided.
751 */
752 return ADDR_FMT_BC3;
753 default:
754 unreachable("invalid compressed bpe");
755 }
756 } else if (surf->blk_w == 5 && surf->blk_h == 4)
757 return ADDR_FMT_ASTC_5x4;
758 else if (surf->blk_w == 5 && surf->blk_h == 5)
759 return ADDR_FMT_ASTC_5x5;
760 else if (surf->blk_w == 6 && surf->blk_h == 5)
761 return ADDR_FMT_ASTC_6x5;
762 else if (surf->blk_w == 6 && surf->blk_h == 6)
763 return ADDR_FMT_ASTC_6x6;
764 else if (surf->blk_w == 8 && surf->blk_h == 5)
765 return ADDR_FMT_ASTC_8x5;
766 else if (surf->blk_w == 8 && surf->blk_h == 6)
767 return ADDR_FMT_ASTC_8x6;
768 else if (surf->blk_w == 8 && surf->blk_h == 8)
769 return ADDR_FMT_ASTC_8x8;
770 else if (surf->blk_w == 10 && surf->blk_h == 5)
771 return ADDR_FMT_ASTC_10x5;
772 else if (surf->blk_w == 10 && surf->blk_h == 6)
773 return ADDR_FMT_ASTC_10x6;
774 else if (surf->blk_w == 10 && surf->blk_h == 8)
775 return ADDR_FMT_ASTC_10x8;
776 else if (surf->blk_w == 10 && surf->blk_h == 10)
777 return ADDR_FMT_ASTC_10x10;
778 else if (surf->blk_w == 12 && surf->blk_h == 10)
779 return ADDR_FMT_ASTC_12x10;
780 else if (surf->blk_w == 12 && surf->blk_h == 12)
781 return ADDR_FMT_ASTC_12x12;
782 } else {
783 switch (surf->bpe) {
784 case 1:
785 assert(!(surf->flags & RADEON_SURF_ZBUFFER));
786 return ADDR_FMT_8;
787 case 2:
788 assert(surf->flags & RADEON_SURF_ZBUFFER || !(surf->flags & RADEON_SURF_SBUFFER));
789 return ADDR_FMT_16;
790 case 4:
791 assert(surf->flags & RADEON_SURF_ZBUFFER || !(surf->flags & RADEON_SURF_SBUFFER));
792 return ADDR_FMT_32;
793 case 8:
794 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
795 return ADDR_FMT_32_32;
796 case 12:
797 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
798 return ADDR_FMT_32_32_32;
799 case 16:
800 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
801 return ADDR_FMT_32_32_32_32;
802 default:
803 unreachable("invalid bpe");
804 }
805 }
806 return ADDR_FMT_INVALID;
807 }
808
809 /* The addrlib pitch alignment is forced to this number for all chips to support interop
810 * between any 2 chips.
811 */
812 #define LINEAR_PITCH_ALIGNMENT 256
813
gfx6_compute_level(ADDR_HANDLE addrlib,const struct ac_surf_config * config,struct radeon_surf * surf,bool is_stencil,unsigned level,bool compressed,ADDR_COMPUTE_SURFACE_INFO_INPUT * AddrSurfInfoIn,ADDR_COMPUTE_SURFACE_INFO_OUTPUT * AddrSurfInfoOut,ADDR_COMPUTE_DCCINFO_INPUT * AddrDccIn,ADDR_COMPUTE_DCCINFO_OUTPUT * AddrDccOut,ADDR_COMPUTE_HTILE_INFO_INPUT * AddrHtileIn,ADDR_COMPUTE_HTILE_INFO_OUTPUT * AddrHtileOut)814 static int gfx6_compute_level(ADDR_HANDLE addrlib, const struct ac_surf_config *config,
815 struct radeon_surf *surf, bool is_stencil, unsigned level,
816 bool compressed, ADDR_COMPUTE_SURFACE_INFO_INPUT *AddrSurfInfoIn,
817 ADDR_COMPUTE_SURFACE_INFO_OUTPUT *AddrSurfInfoOut,
818 ADDR_COMPUTE_DCCINFO_INPUT *AddrDccIn,
819 ADDR_COMPUTE_DCCINFO_OUTPUT *AddrDccOut,
820 ADDR_COMPUTE_HTILE_INFO_INPUT *AddrHtileIn,
821 ADDR_COMPUTE_HTILE_INFO_OUTPUT *AddrHtileOut)
822 {
823 struct legacy_surf_level *surf_level;
824 struct legacy_surf_dcc_level *dcc_level;
825 ADDR_E_RETURNCODE ret;
826 bool mode_has_htile = false;
827
828 AddrSurfInfoIn->mipLevel = level;
829 AddrSurfInfoIn->width = u_minify(config->info.width, level);
830 AddrSurfInfoIn->height = u_minify(config->info.height, level);
831
832 /* Make GFX6 linear surfaces compatible with all chips for multi-GPU interop. */
833 if (config->info.levels == 1 && AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED &&
834 AddrSurfInfoIn->bpp && util_is_power_of_two_or_zero(AddrSurfInfoIn->bpp)) {
835 unsigned alignment = LINEAR_PITCH_ALIGNMENT / surf->bpe;
836
837 AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, alignment);
838 }
839
840 /* addrlib assumes the bytes/pixel is a divisor of 64, which is not
841 * true for r32g32b32 formats. */
842 if (AddrSurfInfoIn->bpp == 96) {
843 assert(config->info.levels == 1);
844 assert(AddrSurfInfoIn->tileMode == ADDR_TM_LINEAR_ALIGNED);
845
846 /* The least common multiple of 64 bytes and 12 bytes/pixel is
847 * 192 bytes, or 16 pixels. */
848 AddrSurfInfoIn->width = align(AddrSurfInfoIn->width, 16);
849 }
850
851 if (config->is_3d)
852 AddrSurfInfoIn->numSlices = u_minify(config->info.depth, level);
853 else if (config->is_cube)
854 AddrSurfInfoIn->numSlices = 6;
855 else
856 AddrSurfInfoIn->numSlices = config->info.array_size;
857
858 if (level > 0) {
859 /* Set the base level pitch. This is needed for calculation
860 * of non-zero levels. */
861 if (is_stencil)
862 AddrSurfInfoIn->basePitch = surf->u.legacy.zs.stencil_level[0].nblk_x;
863 else
864 AddrSurfInfoIn->basePitch = surf->u.legacy.level[0].nblk_x;
865
866 /* Convert blocks to pixels for compressed formats. */
867 if (compressed)
868 AddrSurfInfoIn->basePitch *= surf->blk_w;
869 }
870
871 ret = AddrComputeSurfaceInfo(addrlib, AddrSurfInfoIn, AddrSurfInfoOut);
872 if (ret != ADDR_OK) {
873 return ret;
874 }
875
876 surf_level = is_stencil ? &surf->u.legacy.zs.stencil_level[level] : &surf->u.legacy.level[level];
877 dcc_level = &surf->u.legacy.color.dcc_level[level];
878 surf_level->offset_256B = align64(surf->surf_size, AddrSurfInfoOut->baseAlign) / 256;
879 surf_level->slice_size_dw = AddrSurfInfoOut->sliceSize / 4;
880 surf_level->nblk_x = AddrSurfInfoOut->pitch;
881 surf_level->nblk_y = AddrSurfInfoOut->height;
882
883 switch (AddrSurfInfoOut->tileMode) {
884 case ADDR_TM_LINEAR_ALIGNED:
885 surf_level->mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
886 break;
887 case ADDR_TM_1D_TILED_THIN1:
888 case ADDR_TM_1D_TILED_THICK:
889 case ADDR_TM_PRT_TILED_THIN1:
890 surf_level->mode = RADEON_SURF_MODE_1D;
891 break;
892 default:
893 surf_level->mode = RADEON_SURF_MODE_2D;
894 break;
895 }
896
897 if (is_stencil)
898 surf->u.legacy.zs.stencil_tiling_index[level] = AddrSurfInfoOut->tileIndex;
899 else
900 surf->u.legacy.tiling_index[level] = AddrSurfInfoOut->tileIndex;
901
902 if (AddrSurfInfoIn->flags.prt) {
903 if (level == 0) {
904 surf->prt_tile_width = AddrSurfInfoOut->pitchAlign;
905 surf->prt_tile_height = AddrSurfInfoOut->heightAlign;
906 surf->prt_tile_depth = AddrSurfInfoOut->depthAlign;
907 }
908 if (surf_level->nblk_x >= surf->prt_tile_width &&
909 surf_level->nblk_y >= surf->prt_tile_height) {
910 /* +1 because the current level is not in the miptail */
911 surf->first_mip_tail_level = level + 1;
912 }
913 }
914
915 surf->surf_size = (uint64_t)surf_level->offset_256B * 256 + AddrSurfInfoOut->surfSize;
916
917 /* Clear DCC fields at the beginning. */
918 if (!AddrSurfInfoIn->flags.depth && !AddrSurfInfoIn->flags.stencil)
919 dcc_level->dcc_offset = 0;
920
921 /* The previous level's flag tells us if we can use DCC for this level. */
922 if (AddrSurfInfoIn->flags.dccCompatible && (level == 0 || AddrDccOut->subLvlCompressible)) {
923 bool prev_level_clearable = level == 0 || AddrDccOut->dccRamSizeAligned;
924
925 AddrDccIn->colorSurfSize = AddrSurfInfoOut->surfSize;
926 AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
927 AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
928 AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
929 AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
930
931 ret = AddrComputeDccInfo(addrlib, AddrDccIn, AddrDccOut);
932
933 if (ret == ADDR_OK) {
934 dcc_level->dcc_offset = surf->meta_size;
935 surf->num_meta_levels = level + 1;
936 surf->meta_size = dcc_level->dcc_offset + AddrDccOut->dccRamSize;
937 surf->meta_alignment_log2 = MAX2(surf->meta_alignment_log2, util_logbase2(AddrDccOut->dccRamBaseAlign));
938
939 /* If the DCC size of a subresource (1 mip level or 1 slice)
940 * is not aligned, the DCC memory layout is not contiguous for
941 * that subresource, which means we can't use fast clear.
942 *
943 * We only do fast clears for whole mipmap levels. If we did
944 * per-slice fast clears, the same restriction would apply.
945 * (i.e. only compute the slice size and see if it's aligned)
946 *
947 * The last level can be non-contiguous and still be clearable
948 * if it's interleaved with the next level that doesn't exist.
949 */
950 if (AddrDccOut->dccRamSizeAligned ||
951 (prev_level_clearable && level == config->info.levels - 1))
952 dcc_level->dcc_fast_clear_size = AddrDccOut->dccFastClearSize;
953 else
954 dcc_level->dcc_fast_clear_size = 0;
955
956 /* Compute the DCC slice size because addrlib doesn't
957 * provide this info. As DCC memory is linear (each
958 * slice is the same size) it's easy to compute.
959 */
960 surf->meta_slice_size = AddrDccOut->dccRamSize / config->info.array_size;
961
962 /* For arrays, we have to compute the DCC info again
963 * with one slice size to get a correct fast clear
964 * size.
965 */
966 if (config->info.array_size > 1) {
967 AddrDccIn->colorSurfSize = AddrSurfInfoOut->sliceSize;
968 AddrDccIn->tileMode = AddrSurfInfoOut->tileMode;
969 AddrDccIn->tileInfo = *AddrSurfInfoOut->pTileInfo;
970 AddrDccIn->tileIndex = AddrSurfInfoOut->tileIndex;
971 AddrDccIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
972
973 ret = AddrComputeDccInfo(addrlib, AddrDccIn, AddrDccOut);
974 if (ret == ADDR_OK) {
975 /* If the DCC memory isn't properly
976 * aligned, the data are interleaved
977 * across slices.
978 */
979 if (AddrDccOut->dccRamSizeAligned)
980 dcc_level->dcc_slice_fast_clear_size = AddrDccOut->dccFastClearSize;
981 else
982 dcc_level->dcc_slice_fast_clear_size = 0;
983 }
984
985 if (surf->flags & RADEON_SURF_CONTIGUOUS_DCC_LAYERS &&
986 surf->meta_slice_size != dcc_level->dcc_slice_fast_clear_size) {
987 surf->meta_size = 0;
988 surf->num_meta_levels = 0;
989 AddrDccOut->subLvlCompressible = false;
990 }
991 } else {
992 dcc_level->dcc_slice_fast_clear_size = dcc_level->dcc_fast_clear_size;
993 }
994 }
995 }
996
997 if (surf_level->mode == RADEON_SURF_MODE_2D)
998 mode_has_htile = true;
999 else if (surf_level->mode == RADEON_SURF_MODE_1D &&
1000 !(surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE))
1001 mode_has_htile = true;
1002
1003 /* HTILE. */
1004 if (!is_stencil && AddrSurfInfoIn->flags.depth && mode_has_htile &&
1005 level == 0 && !(surf->flags & RADEON_SURF_NO_HTILE)) {
1006 AddrHtileIn->flags.tcCompatible = AddrSurfInfoOut->tcCompatible;
1007 AddrHtileIn->pitch = AddrSurfInfoOut->pitch;
1008 AddrHtileIn->height = AddrSurfInfoOut->height;
1009 AddrHtileIn->numSlices = AddrSurfInfoOut->depth;
1010 AddrHtileIn->blockWidth = ADDR_HTILE_BLOCKSIZE_8;
1011 AddrHtileIn->blockHeight = ADDR_HTILE_BLOCKSIZE_8;
1012 AddrHtileIn->pTileInfo = AddrSurfInfoOut->pTileInfo;
1013 AddrHtileIn->tileIndex = AddrSurfInfoOut->tileIndex;
1014 AddrHtileIn->macroModeIndex = AddrSurfInfoOut->macroModeIndex;
1015
1016 ret = AddrComputeHtileInfo(addrlib, AddrHtileIn, AddrHtileOut);
1017
1018 if (ret == ADDR_OK) {
1019 surf->meta_size = AddrHtileOut->htileBytes;
1020 surf->meta_slice_size = AddrHtileOut->sliceSize;
1021 surf->meta_alignment_log2 = util_logbase2(AddrHtileOut->baseAlign);
1022 surf->meta_pitch = AddrHtileOut->pitch;
1023 surf->num_meta_levels = level + 1;
1024 }
1025 }
1026
1027 return 0;
1028 }
1029
gfx6_set_micro_tile_mode(struct radeon_surf * surf,const struct radeon_info * info)1030 static void gfx6_set_micro_tile_mode(struct radeon_surf *surf, const struct radeon_info *info)
1031 {
1032 uint32_t tile_mode = info->si_tile_mode_array[surf->u.legacy.tiling_index[0]];
1033
1034 if (info->gfx_level >= GFX7)
1035 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE_NEW(tile_mode);
1036 else
1037 surf->micro_tile_mode = G_009910_MICRO_TILE_MODE(tile_mode);
1038 }
1039
cik_get_macro_tile_index(struct radeon_surf * surf)1040 static unsigned cik_get_macro_tile_index(struct radeon_surf *surf)
1041 {
1042 unsigned index, tileb;
1043
1044 tileb = 8 * 8 * surf->bpe;
1045 tileb = MIN2(surf->u.legacy.tile_split, tileb);
1046
1047 for (index = 0; tileb > 64; index++)
1048 tileb >>= 1;
1049
1050 assert(index < 16);
1051 return index;
1052 }
1053
get_display_flag(const struct ac_surf_config * config,const struct radeon_surf * surf)1054 static bool get_display_flag(const struct ac_surf_config *config, const struct radeon_surf *surf)
1055 {
1056 unsigned num_channels = config->info.num_channels;
1057 unsigned bpe = surf->bpe;
1058
1059 /* With modifiers the kernel is in charge of whether it is displayable.
1060 * We need to ensure at least 32 pixels pitch alignment, but this is
1061 * always the case when the blocksize >= 4K.
1062 */
1063 if (surf->modifier != DRM_FORMAT_MOD_INVALID)
1064 return false;
1065
1066 if (!config->is_1d && !config->is_3d && !config->is_cube &&
1067 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
1068 surf->flags & RADEON_SURF_SCANOUT && config->info.samples <= 1 && surf->blk_w <= 2 &&
1069 surf->blk_h == 1) {
1070 /* subsampled */
1071 if (surf->blk_w == 2 && surf->blk_h == 1)
1072 return true;
1073
1074 if (/* RGBA8 or RGBA16F */
1075 (bpe >= 4 && bpe <= 8 && num_channels == 4) ||
1076 /* R5G6B5 or R5G5B5A1 */
1077 (bpe == 2 && num_channels >= 3) ||
1078 /* C8 palette */
1079 (bpe == 1 && num_channels == 1))
1080 return true;
1081 }
1082 return false;
1083 }
1084
1085 /**
1086 * This must be called after the first level is computed.
1087 *
1088 * Copy surface-global settings like pipe/bank config from level 0 surface
1089 * computation, and compute tile swizzle.
1090 */
gfx6_surface_settings(ADDR_HANDLE addrlib,const struct radeon_info * info,const struct ac_surf_config * config,ADDR_COMPUTE_SURFACE_INFO_OUTPUT * csio,struct radeon_surf * surf)1091 static int gfx6_surface_settings(ADDR_HANDLE addrlib, const struct radeon_info *info,
1092 const struct ac_surf_config *config,
1093 ADDR_COMPUTE_SURFACE_INFO_OUTPUT *csio, struct radeon_surf *surf)
1094 {
1095 surf->surf_alignment_log2 = util_logbase2(csio->baseAlign);
1096 surf->u.legacy.pipe_config = csio->pTileInfo->pipeConfig - 1;
1097 gfx6_set_micro_tile_mode(surf, info);
1098
1099 /* For 2D modes only. */
1100 if (csio->tileMode >= ADDR_TM_2D_TILED_THIN1) {
1101 surf->u.legacy.bankw = csio->pTileInfo->bankWidth;
1102 surf->u.legacy.bankh = csio->pTileInfo->bankHeight;
1103 surf->u.legacy.mtilea = csio->pTileInfo->macroAspectRatio;
1104 surf->u.legacy.tile_split = csio->pTileInfo->tileSplitBytes;
1105 surf->u.legacy.num_banks = csio->pTileInfo->banks;
1106 surf->u.legacy.macro_tile_index = csio->macroModeIndex;
1107 } else {
1108 surf->u.legacy.macro_tile_index = 0;
1109 }
1110
1111 /* Compute tile swizzle. */
1112 /* TODO: fix tile swizzle with mipmapping for GFX6 */
1113 if ((info->gfx_level >= GFX7 || config->info.levels == 1) && config->info.surf_index &&
1114 surf->u.legacy.level[0].mode == RADEON_SURF_MODE_2D &&
1115 !(surf->flags & (RADEON_SURF_Z_OR_SBUFFER | RADEON_SURF_SHAREABLE)) &&
1116 !get_display_flag(config, surf)) {
1117 ADDR_COMPUTE_BASE_SWIZZLE_INPUT AddrBaseSwizzleIn = {0};
1118 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT AddrBaseSwizzleOut = {0};
1119
1120 AddrBaseSwizzleIn.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
1121 AddrBaseSwizzleOut.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
1122
1123 AddrBaseSwizzleIn.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
1124 AddrBaseSwizzleIn.tileIndex = csio->tileIndex;
1125 AddrBaseSwizzleIn.macroModeIndex = csio->macroModeIndex;
1126 AddrBaseSwizzleIn.pTileInfo = csio->pTileInfo;
1127 AddrBaseSwizzleIn.tileMode = csio->tileMode;
1128
1129 int r = AddrComputeBaseSwizzle(addrlib, &AddrBaseSwizzleIn, &AddrBaseSwizzleOut);
1130 if (r != ADDR_OK)
1131 return r;
1132
1133 assert(AddrBaseSwizzleOut.tileSwizzle <=
1134 u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1135 surf->tile_swizzle = AddrBaseSwizzleOut.tileSwizzle;
1136 }
1137 return 0;
1138 }
1139
ac_compute_cmask(const struct radeon_info * info,const struct ac_surf_config * config,struct radeon_surf * surf)1140 static void ac_compute_cmask(const struct radeon_info *info, const struct ac_surf_config *config,
1141 struct radeon_surf *surf)
1142 {
1143 unsigned pipe_interleave_bytes = info->pipe_interleave_bytes;
1144 unsigned num_pipes = info->num_tile_pipes;
1145 unsigned cl_width, cl_height;
1146
1147 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER || surf->is_linear ||
1148 (config->info.samples >= 2 && !surf->fmask_size))
1149 return;
1150
1151 assert(info->gfx_level <= GFX8);
1152
1153 switch (num_pipes) {
1154 case 2:
1155 cl_width = 32;
1156 cl_height = 16;
1157 break;
1158 case 4:
1159 cl_width = 32;
1160 cl_height = 32;
1161 break;
1162 case 8:
1163 cl_width = 64;
1164 cl_height = 32;
1165 break;
1166 case 16: /* Hawaii */
1167 cl_width = 64;
1168 cl_height = 64;
1169 break;
1170 default:
1171 assert(0);
1172 return;
1173 }
1174
1175 unsigned base_align = num_pipes * pipe_interleave_bytes;
1176
1177 unsigned width = align(surf->u.legacy.level[0].nblk_x, cl_width * 8);
1178 unsigned height = align(surf->u.legacy.level[0].nblk_y, cl_height * 8);
1179 unsigned slice_elements = (width * height) / (8 * 8);
1180
1181 /* Each element of CMASK is a nibble. */
1182 unsigned slice_bytes = slice_elements / 2;
1183
1184 surf->u.legacy.color.cmask_slice_tile_max = (width * height) / (128 * 128);
1185 if (surf->u.legacy.color.cmask_slice_tile_max)
1186 surf->u.legacy.color.cmask_slice_tile_max -= 1;
1187
1188 unsigned num_layers;
1189 if (config->is_3d)
1190 num_layers = config->info.depth;
1191 else if (config->is_cube)
1192 num_layers = 6;
1193 else
1194 num_layers = config->info.array_size;
1195
1196 surf->cmask_alignment_log2 = util_logbase2(MAX2(256, base_align));
1197 surf->cmask_slice_size = align(slice_bytes, base_align);
1198 surf->cmask_size = surf->cmask_slice_size * num_layers;
1199 }
1200
1201 /**
1202 * Fill in the tiling information in \p surf based on the given surface config.
1203 *
1204 * The following fields of \p surf must be initialized by the caller:
1205 * blk_w, blk_h, bpe, flags.
1206 */
gfx6_compute_surface(ADDR_HANDLE addrlib,const struct radeon_info * info,const struct ac_surf_config * config,enum radeon_surf_mode mode,struct radeon_surf * surf)1207 static int gfx6_compute_surface(ADDR_HANDLE addrlib, const struct radeon_info *info,
1208 const struct ac_surf_config *config, enum radeon_surf_mode mode,
1209 struct radeon_surf *surf)
1210 {
1211 unsigned level;
1212 bool compressed;
1213 ADDR_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
1214 ADDR_COMPUTE_SURFACE_INFO_OUTPUT AddrSurfInfoOut = {0};
1215 ADDR_COMPUTE_DCCINFO_INPUT AddrDccIn = {0};
1216 ADDR_COMPUTE_DCCINFO_OUTPUT AddrDccOut = {0};
1217 ADDR_COMPUTE_HTILE_INFO_INPUT AddrHtileIn = {0};
1218 ADDR_COMPUTE_HTILE_INFO_OUTPUT AddrHtileOut = {0};
1219 ADDR_TILEINFO AddrTileInfoIn = {0};
1220 ADDR_TILEINFO AddrTileInfoOut = {0};
1221 int r;
1222
1223 AddrSurfInfoIn.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_INPUT);
1224 AddrSurfInfoOut.size = sizeof(ADDR_COMPUTE_SURFACE_INFO_OUTPUT);
1225 AddrDccIn.size = sizeof(ADDR_COMPUTE_DCCINFO_INPUT);
1226 AddrDccOut.size = sizeof(ADDR_COMPUTE_DCCINFO_OUTPUT);
1227 AddrHtileIn.size = sizeof(ADDR_COMPUTE_HTILE_INFO_INPUT);
1228 AddrHtileOut.size = sizeof(ADDR_COMPUTE_HTILE_INFO_OUTPUT);
1229 AddrSurfInfoOut.pTileInfo = &AddrTileInfoOut;
1230
1231 compressed = surf->blk_w == 4 && surf->blk_h == 4;
1232
1233 /* MSAA requires 2D tiling. */
1234 if (config->info.samples > 1)
1235 mode = RADEON_SURF_MODE_2D;
1236
1237 /* DB doesn't support linear layouts. */
1238 if (surf->flags & (RADEON_SURF_Z_OR_SBUFFER) && mode < RADEON_SURF_MODE_1D)
1239 mode = RADEON_SURF_MODE_1D;
1240
1241 /* Set the requested tiling mode. */
1242 switch (mode) {
1243 case RADEON_SURF_MODE_LINEAR_ALIGNED:
1244 AddrSurfInfoIn.tileMode = ADDR_TM_LINEAR_ALIGNED;
1245 break;
1246 case RADEON_SURF_MODE_1D:
1247 if (surf->flags & RADEON_SURF_PRT)
1248 AddrSurfInfoIn.tileMode = ADDR_TM_PRT_TILED_THIN1;
1249 else if (config->is_3d)
1250 AddrSurfInfoIn.tileMode = ADDR_TM_1D_TILED_THICK;
1251 else
1252 AddrSurfInfoIn.tileMode = ADDR_TM_1D_TILED_THIN1;
1253 break;
1254 case RADEON_SURF_MODE_2D:
1255 if (surf->flags & RADEON_SURF_PRT) {
1256 if (config->is_3d && surf->bpe < 8) {
1257 AddrSurfInfoIn.tileMode = ADDR_TM_PRT_2D_TILED_THICK;
1258 } else {
1259 AddrSurfInfoIn.tileMode = ADDR_TM_PRT_2D_TILED_THIN1;
1260 }
1261 } else {
1262 if (config->is_3d) {
1263 /* GFX6 doesn't have 3D_TILED_XTHICK. */
1264 if (info->gfx_level >= GFX7)
1265 AddrSurfInfoIn.tileMode = ADDR_TM_3D_TILED_XTHICK;
1266 else
1267 AddrSurfInfoIn.tileMode = ADDR_TM_2D_TILED_XTHICK;
1268 } else {
1269 AddrSurfInfoIn.tileMode = ADDR_TM_2D_TILED_THIN1;
1270 }
1271 }
1272 break;
1273 default:
1274 assert(0);
1275 }
1276
1277 AddrSurfInfoIn.format = bpe_to_format(surf);
1278 if (!compressed)
1279 AddrDccIn.bpp = AddrSurfInfoIn.bpp = surf->bpe * 8;
1280
1281 /* Setting ADDR_FMT_32_32_32 breaks gfx6-8, while INVALID works. */
1282 if (AddrSurfInfoIn.format == ADDR_FMT_32_32_32)
1283 AddrSurfInfoIn.format = ADDR_FMT_INVALID;
1284
1285 AddrDccIn.numSamples = AddrSurfInfoIn.numSamples = MAX2(1, config->info.samples);
1286 AddrSurfInfoIn.tileIndex = -1;
1287
1288 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER)) {
1289 AddrDccIn.numSamples = AddrSurfInfoIn.numFrags = MAX2(1, config->info.storage_samples);
1290 }
1291
1292 /* Set the micro tile type. */
1293 if (surf->flags & RADEON_SURF_SCANOUT)
1294 AddrSurfInfoIn.tileType = ADDR_DISPLAYABLE;
1295 else if (surf->flags & RADEON_SURF_Z_OR_SBUFFER)
1296 AddrSurfInfoIn.tileType = ADDR_DEPTH_SAMPLE_ORDER;
1297 else
1298 AddrSurfInfoIn.tileType = ADDR_NON_DISPLAYABLE;
1299
1300 AddrSurfInfoIn.flags.color = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
1301 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
1302 AddrSurfInfoIn.flags.cube = config->is_cube;
1303 AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
1304 AddrSurfInfoIn.flags.pow2Pad = config->info.levels > 1;
1305 AddrSurfInfoIn.flags.tcCompatible = (surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE) != 0;
1306 AddrSurfInfoIn.flags.prt = (surf->flags & RADEON_SURF_PRT) != 0;
1307
1308 /* Only degrade the tile mode for space if TC-compatible HTILE hasn't been
1309 * requested, because TC-compatible HTILE requires 2D tiling.
1310 */
1311 AddrSurfInfoIn.flags.opt4Space = !AddrSurfInfoIn.flags.tcCompatible && !config->is_3d &&
1312 !AddrSurfInfoIn.flags.fmask && config->info.samples <= 1 &&
1313 !(surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE);
1314
1315 /* DCC notes:
1316 * - If we add MSAA support, keep in mind that CB can't decompress 8bpp
1317 * with samples >= 4.
1318 * - Mipmapped array textures have low performance (discovered by a closed
1319 * driver team).
1320 */
1321 AddrSurfInfoIn.flags.dccCompatible =
1322 info->gfx_level >= GFX8 && info->has_graphics && /* disable DCC on compute-only chips */
1323 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && !(surf->flags & RADEON_SURF_DISABLE_DCC) &&
1324 !compressed &&
1325 ((config->info.array_size == 1 && config->info.depth == 1) || config->info.levels == 1);
1326
1327 AddrSurfInfoIn.flags.noStencil =
1328 !(surf->flags & RADEON_SURF_SBUFFER) || (surf->flags & RADEON_SURF_NO_RENDER_TARGET);
1329
1330 AddrSurfInfoIn.flags.compressZ = !!(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
1331
1332 /* On GFX7-GFX8, the DB uses the same pitch and tile mode (except tilesplit)
1333 * for Z and stencil. This can cause a number of problems which we work
1334 * around here:
1335 *
1336 * - a depth part that is incompatible with mipmapped texturing
1337 * - at least on Stoney, entirely incompatible Z/S aspects (e.g.
1338 * incorrect tiling applied to the stencil part, stencil buffer
1339 * memory accesses that go out of bounds) even without mipmapping
1340 *
1341 * Some piglit tests that are prone to different types of related
1342 * failures:
1343 * ./bin/ext_framebuffer_multisample-upsample 2 stencil
1344 * ./bin/framebuffer-blit-levels {draw,read} stencil
1345 * ./bin/ext_framebuffer_multisample-unaligned-blit N {depth,stencil} {msaa,upsample,downsample}
1346 * ./bin/fbo-depth-array fs-writes-{depth,stencil} / {depth,stencil}-{clear,layered-clear,draw}
1347 * ./bin/depthstencil-render-miplevels 1024 d=s=z24_s8
1348 */
1349 int stencil_tile_idx = -1;
1350
1351 if (AddrSurfInfoIn.flags.depth && !AddrSurfInfoIn.flags.noStencil &&
1352 (config->info.levels > 1 || info->family == CHIP_STONEY)) {
1353 /* Compute stencilTileIdx that is compatible with the (depth)
1354 * tileIdx. This degrades the depth surface if necessary to
1355 * ensure that a matching stencilTileIdx exists. */
1356 AddrSurfInfoIn.flags.matchStencilTileCfg = 1;
1357
1358 /* Keep the depth mip-tail compatible with texturing. */
1359 if (config->info.levels > 1 && !(surf->flags & RADEON_SURF_NO_STENCIL_ADJUST))
1360 AddrSurfInfoIn.flags.noStencil = 1;
1361 }
1362
1363 /* Set preferred macrotile parameters. This is usually required
1364 * for shared resources. This is for 2D tiling only. */
1365 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
1366 AddrSurfInfoIn.tileMode >= ADDR_TM_2D_TILED_THIN1 && surf->u.legacy.bankw &&
1367 surf->u.legacy.bankh && surf->u.legacy.mtilea && surf->u.legacy.tile_split) {
1368 /* If any of these parameters are incorrect, the calculation
1369 * will fail. */
1370 AddrTileInfoIn.banks = surf->u.legacy.num_banks;
1371 AddrTileInfoIn.bankWidth = surf->u.legacy.bankw;
1372 AddrTileInfoIn.bankHeight = surf->u.legacy.bankh;
1373 AddrTileInfoIn.macroAspectRatio = surf->u.legacy.mtilea;
1374 AddrTileInfoIn.tileSplitBytes = surf->u.legacy.tile_split;
1375 AddrTileInfoIn.pipeConfig = surf->u.legacy.pipe_config + 1; /* +1 compared to GB_TILE_MODE */
1376 AddrSurfInfoIn.flags.opt4Space = 0;
1377 AddrSurfInfoIn.pTileInfo = &AddrTileInfoIn;
1378
1379 /* If AddrSurfInfoIn.pTileInfo is set, Addrlib doesn't set
1380 * the tile index, because we are expected to know it if
1381 * we know the other parameters.
1382 *
1383 * This is something that can easily be fixed in Addrlib.
1384 * For now, just figure it out here.
1385 * Note that only 2D_TILE_THIN1 is handled here.
1386 */
1387 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
1388 assert(AddrSurfInfoIn.tileMode == ADDR_TM_2D_TILED_THIN1);
1389
1390 if (info->gfx_level == GFX6) {
1391 if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE) {
1392 if (surf->bpe == 2)
1393 AddrSurfInfoIn.tileIndex = 11; /* 16bpp */
1394 else
1395 AddrSurfInfoIn.tileIndex = 12; /* 32bpp */
1396 } else {
1397 if (surf->bpe == 1)
1398 AddrSurfInfoIn.tileIndex = 14; /* 8bpp */
1399 else if (surf->bpe == 2)
1400 AddrSurfInfoIn.tileIndex = 15; /* 16bpp */
1401 else if (surf->bpe == 4)
1402 AddrSurfInfoIn.tileIndex = 16; /* 32bpp */
1403 else
1404 AddrSurfInfoIn.tileIndex = 17; /* 64bpp (and 128bpp) */
1405 }
1406 } else {
1407 /* GFX7 - GFX8 */
1408 if (AddrSurfInfoIn.tileType == ADDR_DISPLAYABLE)
1409 AddrSurfInfoIn.tileIndex = 10; /* 2D displayable */
1410 else
1411 AddrSurfInfoIn.tileIndex = 14; /* 2D non-displayable */
1412
1413 /* Addrlib doesn't set this if tileIndex is forced like above. */
1414 AddrSurfInfoOut.macroModeIndex = cik_get_macro_tile_index(surf);
1415 }
1416 }
1417
1418 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
1419 surf->num_meta_levels = 0;
1420 surf->surf_size = 0;
1421 surf->meta_size = 0;
1422 surf->meta_slice_size = 0;
1423 surf->meta_alignment_log2 = 0;
1424
1425 const bool only_stencil =
1426 (surf->flags & RADEON_SURF_SBUFFER) && !(surf->flags & RADEON_SURF_ZBUFFER);
1427
1428 /* Calculate texture layout information. */
1429 if (!only_stencil) {
1430 for (level = 0; level < config->info.levels; level++) {
1431 r = gfx6_compute_level(addrlib, config, surf, false, level, compressed, &AddrSurfInfoIn,
1432 &AddrSurfInfoOut, &AddrDccIn, &AddrDccOut, &AddrHtileIn,
1433 &AddrHtileOut);
1434 if (r)
1435 return r;
1436
1437 if (level > 0)
1438 continue;
1439
1440 if (!AddrSurfInfoOut.tcCompatible) {
1441 AddrSurfInfoIn.flags.tcCompatible = 0;
1442 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1443 }
1444
1445 if (AddrSurfInfoIn.flags.matchStencilTileCfg) {
1446 AddrSurfInfoIn.flags.matchStencilTileCfg = 0;
1447 AddrSurfInfoIn.tileIndex = AddrSurfInfoOut.tileIndex;
1448 stencil_tile_idx = AddrSurfInfoOut.stencilTileIdx;
1449
1450 assert(stencil_tile_idx >= 0);
1451 }
1452
1453 r = gfx6_surface_settings(addrlib, info, config, &AddrSurfInfoOut, surf);
1454 if (r)
1455 return r;
1456 }
1457 }
1458
1459 /* Calculate texture layout information for stencil. */
1460 if (surf->flags & RADEON_SURF_SBUFFER) {
1461 AddrSurfInfoIn.tileIndex = stencil_tile_idx;
1462 AddrSurfInfoIn.bpp = 8;
1463 AddrSurfInfoIn.format = ADDR_FMT_8;
1464 AddrSurfInfoIn.flags.depth = 0;
1465 AddrSurfInfoIn.flags.stencil = 1;
1466 AddrSurfInfoIn.flags.tcCompatible = 0;
1467 /* This will be ignored if AddrSurfInfoIn.pTileInfo is NULL. */
1468 AddrTileInfoIn.tileSplitBytes = surf->u.legacy.stencil_tile_split;
1469
1470 for (level = 0; level < config->info.levels; level++) {
1471 r = gfx6_compute_level(addrlib, config, surf, true, level, compressed, &AddrSurfInfoIn,
1472 &AddrSurfInfoOut, &AddrDccIn, &AddrDccOut, NULL, NULL);
1473 if (r)
1474 return r;
1475
1476 /* DB uses the depth pitch for both stencil and depth. */
1477 if (!only_stencil) {
1478 if (surf->u.legacy.zs.stencil_level[level].nblk_x != surf->u.legacy.level[level].nblk_x)
1479 surf->u.legacy.stencil_adjusted = true;
1480 } else {
1481 surf->u.legacy.level[level].nblk_x = surf->u.legacy.zs.stencil_level[level].nblk_x;
1482 }
1483
1484 if (level == 0) {
1485 if (only_stencil) {
1486 r = gfx6_surface_settings(addrlib, info, config, &AddrSurfInfoOut, surf);
1487 if (r)
1488 return r;
1489 }
1490
1491 /* For 2D modes only. */
1492 if (AddrSurfInfoOut.tileMode >= ADDR_TM_2D_TILED_THIN1) {
1493 surf->u.legacy.stencil_tile_split = AddrSurfInfoOut.pTileInfo->tileSplitBytes;
1494 }
1495 }
1496 }
1497 }
1498
1499 /* Compute FMASK. */
1500 if (config->info.samples >= 2 && AddrSurfInfoIn.flags.color && info->has_graphics &&
1501 !(surf->flags & RADEON_SURF_NO_FMASK)) {
1502 ADDR_COMPUTE_FMASK_INFO_INPUT fin = {0};
1503 ADDR_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
1504 ADDR_TILEINFO fmask_tile_info = {0};
1505
1506 fin.size = sizeof(fin);
1507 fout.size = sizeof(fout);
1508
1509 fin.tileMode = AddrSurfInfoOut.tileMode;
1510 fin.pitch = AddrSurfInfoOut.pitch;
1511 fin.height = config->info.height;
1512 fin.numSlices = AddrSurfInfoIn.numSlices;
1513 fin.numSamples = AddrSurfInfoIn.numSamples;
1514 fin.numFrags = AddrSurfInfoIn.numFrags;
1515 fin.tileIndex = -1;
1516 fout.pTileInfo = &fmask_tile_info;
1517
1518 r = AddrComputeFmaskInfo(addrlib, &fin, &fout);
1519 if (r)
1520 return r;
1521
1522 surf->fmask_size = fout.fmaskBytes;
1523 surf->fmask_alignment_log2 = util_logbase2(fout.baseAlign);
1524 surf->fmask_slice_size = fout.sliceSize;
1525 surf->fmask_tile_swizzle = 0;
1526
1527 surf->u.legacy.color.fmask.slice_tile_max = (fout.pitch * fout.height) / 64;
1528 if (surf->u.legacy.color.fmask.slice_tile_max)
1529 surf->u.legacy.color.fmask.slice_tile_max -= 1;
1530
1531 surf->u.legacy.color.fmask.tiling_index = fout.tileIndex;
1532 surf->u.legacy.color.fmask.bankh = fout.pTileInfo->bankHeight;
1533 surf->u.legacy.color.fmask.pitch_in_pixels = fout.pitch;
1534
1535 /* Compute tile swizzle for FMASK. */
1536 if (config->info.fmask_surf_index && !(surf->flags & RADEON_SURF_SHAREABLE)) {
1537 ADDR_COMPUTE_BASE_SWIZZLE_INPUT xin = {0};
1538 ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT xout = {0};
1539
1540 xin.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_INPUT);
1541 xout.size = sizeof(ADDR_COMPUTE_BASE_SWIZZLE_OUTPUT);
1542
1543 /* This counter starts from 1 instead of 0. */
1544 xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
1545 xin.tileIndex = fout.tileIndex;
1546 xin.macroModeIndex = fout.macroModeIndex;
1547 xin.pTileInfo = fout.pTileInfo;
1548 xin.tileMode = fin.tileMode;
1549
1550 int r = AddrComputeBaseSwizzle(addrlib, &xin, &xout);
1551 if (r != ADDR_OK)
1552 return r;
1553
1554 assert(xout.tileSwizzle <= u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
1555 surf->fmask_tile_swizzle = xout.tileSwizzle;
1556 }
1557 }
1558
1559 /* Recalculate the whole DCC miptree size including disabled levels.
1560 * This is what addrlib does, but calling addrlib would be a lot more
1561 * complicated.
1562 */
1563 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && surf->meta_size && config->info.levels > 1) {
1564 /* The smallest miplevels that are never compressed by DCC
1565 * still read the DCC buffer from memory if the base level uses DCC,
1566 * and for some reason the DCC buffer needs to be larger if
1567 * the miptree uses non-zero tile_swizzle. Otherwise there are
1568 * VM faults.
1569 *
1570 * "dcc_alignment * 4" was determined by trial and error.
1571 */
1572 surf->meta_size = align64(surf->surf_size >> 8, (1ull << surf->meta_alignment_log2) * 4);
1573 }
1574
1575 /* Make sure HTILE covers the whole miptree, because the shader reads
1576 * TC-compatible HTILE even for levels where it's disabled by DB.
1577 */
1578 if (surf->flags & (RADEON_SURF_Z_OR_SBUFFER | RADEON_SURF_TC_COMPATIBLE_HTILE) &&
1579 surf->meta_size && config->info.levels > 1) {
1580 /* MSAA can't occur with levels > 1, so ignore the sample count. */
1581 const unsigned total_pixels = surf->surf_size / surf->bpe;
1582 const unsigned htile_block_size = 8 * 8;
1583 const unsigned htile_element_size = 4;
1584
1585 surf->meta_size = (total_pixels / htile_block_size) * htile_element_size;
1586 surf->meta_size = align(surf->meta_size, 1 << surf->meta_alignment_log2);
1587 } else if (surf->flags & RADEON_SURF_Z_OR_SBUFFER && !surf->meta_size) {
1588 /* Unset this if HTILE is not present. */
1589 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
1590 }
1591
1592 surf->is_linear = (only_stencil ? surf->u.legacy.zs.stencil_level[0].mode :
1593 surf->u.legacy.level[0].mode) == RADEON_SURF_MODE_LINEAR_ALIGNED;
1594
1595 surf->is_displayable = surf->is_linear || surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY ||
1596 surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER;
1597
1598 surf->thick_tiling = AddrSurfInfoOut.tileMode == ADDR_TM_1D_TILED_THICK ||
1599 AddrSurfInfoOut.tileMode == ADDR_TM_2D_TILED_THICK ||
1600 AddrSurfInfoOut.tileMode == ADDR_TM_2B_TILED_THICK ||
1601 AddrSurfInfoOut.tileMode == ADDR_TM_3D_TILED_THICK ||
1602 AddrSurfInfoOut.tileMode == ADDR_TM_3B_TILED_THICK ||
1603 AddrSurfInfoOut.tileMode == ADDR_TM_2D_TILED_XTHICK ||
1604 AddrSurfInfoOut.tileMode == ADDR_TM_3D_TILED_XTHICK ||
1605 AddrSurfInfoOut.tileMode == ADDR_TM_PRT_TILED_THICK ||
1606 AddrSurfInfoOut.tileMode == ADDR_TM_PRT_2D_TILED_THICK ||
1607 AddrSurfInfoOut.tileMode == ADDR_TM_PRT_3D_TILED_THICK ||
1608 /* Not thick per se, but these also benefit from the 3D access pattern
1609 * due to pipe rotation between slices.
1610 */
1611 AddrSurfInfoOut.tileMode == ADDR_TM_3D_TILED_THIN1 ||
1612 AddrSurfInfoOut.tileMode == ADDR_TM_PRT_3D_TILED_THIN1;
1613
1614 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
1615 * used at the same time. This case is not currently expected to occur
1616 * because we don't use rotated. Enforce this restriction on all chips
1617 * to facilitate testing.
1618 */
1619 if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER) {
1620 assert(!"rotate micro tile mode is unsupported");
1621 return ADDR_ERROR;
1622 }
1623
1624 ac_compute_cmask(info, config, surf);
1625 return 0;
1626 }
1627
1628 /* This is only called when expecting a tiled layout. */
gfx9_get_preferred_swizzle_mode(ADDR_HANDLE addrlib,const struct radeon_info * info,struct radeon_surf * surf,ADDR2_COMPUTE_SURFACE_INFO_INPUT * in,bool is_fmask,AddrSwizzleMode * swizzle_mode)1629 static int gfx9_get_preferred_swizzle_mode(ADDR_HANDLE addrlib, const struct radeon_info *info,
1630 struct radeon_surf *surf,
1631 ADDR2_COMPUTE_SURFACE_INFO_INPUT *in, bool is_fmask,
1632 AddrSwizzleMode *swizzle_mode)
1633 {
1634 ADDR_E_RETURNCODE ret;
1635 ADDR2_GET_PREFERRED_SURF_SETTING_INPUT sin = {0};
1636 ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT sout = {0};
1637
1638 sin.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_INPUT);
1639 sout.size = sizeof(ADDR2_GET_PREFERRED_SURF_SETTING_OUTPUT);
1640
1641 sin.flags = in->flags;
1642 sin.resourceType = in->resourceType;
1643 sin.format = in->format;
1644 sin.resourceLoction = ADDR_RSRC_LOC_INVIS;
1645
1646 /* TODO: We could allow some of these: */
1647 sin.forbiddenBlock.micro = 1; /* don't allow the 256B swizzle modes */
1648
1649 if (info->gfx_level >= GFX11) {
1650 /* Disable 256K on APUs because it doesn't work with DAL. */
1651 if (!info->has_dedicated_vram) {
1652 sin.forbiddenBlock.gfx11.thin256KB = 1;
1653 sin.forbiddenBlock.gfx11.thick256KB = 1;
1654 }
1655 } else {
1656 sin.forbiddenBlock.var = 1; /* don't allow the variable-sized swizzle modes */
1657 }
1658
1659 sin.bpp = in->bpp;
1660 sin.width = in->width;
1661 sin.height = in->height;
1662 sin.numSlices = in->numSlices;
1663 sin.numMipLevels = in->numMipLevels;
1664 sin.numSamples = in->numSamples;
1665 sin.numFrags = in->numFrags;
1666
1667 if (is_fmask) {
1668 sin.flags.display = 0;
1669 sin.flags.color = 0;
1670 sin.flags.fmask = 1;
1671 }
1672
1673 /* With PRT images we want to force 64 KiB block size so that the image
1674 * created is consistent with the format properties returned in Vulkan
1675 * independent of the image. */
1676 if (sin.flags.prt) {
1677 sin.forbiddenBlock.macroThin4KB = 1;
1678 sin.forbiddenBlock.macroThick4KB = 1;
1679 if (info->gfx_level >= GFX11) {
1680 sin.forbiddenBlock.gfx11.thin256KB = 1;
1681 sin.forbiddenBlock.gfx11.thick256KB = 1;
1682 }
1683 sin.forbiddenBlock.linear = 1;
1684 } else if (surf->flags & RADEON_SURF_PREFER_4K_ALIGNMENT) {
1685 sin.forbiddenBlock.macroThin64KB = 1;
1686 sin.forbiddenBlock.macroThick64KB = 1;
1687 }
1688
1689 if (surf->flags & (RADEON_SURF_PREFER_64K_ALIGNMENT | RADEON_SURF_PREFER_4K_ALIGNMENT)) {
1690 if (info->gfx_level >= GFX11) {
1691 sin.forbiddenBlock.gfx11.thin256KB = 1;
1692 sin.forbiddenBlock.gfx11.thick256KB = 1;
1693 }
1694 }
1695
1696 if (surf->flags & RADEON_SURF_FORCE_MICRO_TILE_MODE) {
1697 sin.forbiddenBlock.linear = 1;
1698
1699 if (surf->micro_tile_mode == RADEON_MICRO_MODE_DISPLAY)
1700 sin.preferredSwSet.sw_D = 1;
1701 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_STANDARD)
1702 sin.preferredSwSet.sw_S = 1;
1703 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_DEPTH)
1704 sin.preferredSwSet.sw_Z = 1;
1705 else if (surf->micro_tile_mode == RADEON_MICRO_MODE_RENDER)
1706 sin.preferredSwSet.sw_R = 1;
1707 }
1708
1709 if (info->gfx_level >= GFX10 && in->resourceType == ADDR_RSRC_TEX_3D && in->numSlices > 1) {
1710 /* 3D textures should use S swizzle modes for the best performance.
1711 * THe only exception is 3D render targets, which prefer 64KB_D_X.
1712 *
1713 * 3D texture sampler performance with a very large 3D texture:
1714 * ADDR_SW_64KB_R_X = 19 FPS (DCC on), 26 FPS (DCC off)
1715 * ADDR_SW_64KB_Z_X = 25 FPS
1716 * ADDR_SW_64KB_D_X = 53 FPS
1717 * ADDR_SW_4KB_S = 53 FPS
1718 * ADDR_SW_64KB_S = 53 FPS
1719 * ADDR_SW_64KB_S_T = 61 FPS
1720 * ADDR_SW_4KB_S_X = 63 FPS
1721 * ADDR_SW_64KB_S_X = 62 FPS
1722 */
1723 sin.preferredSwSet.sw_S = 1;
1724 }
1725
1726 ret = Addr2GetPreferredSurfaceSetting(addrlib, &sin, &sout);
1727 if (ret != ADDR_OK)
1728 return ret;
1729
1730 *swizzle_mode = sout.swizzleMode;
1731 return 0;
1732 }
1733
is_dcc_supported_by_CB(const struct radeon_info * info,unsigned sw_mode)1734 static bool is_dcc_supported_by_CB(const struct radeon_info *info, unsigned sw_mode)
1735 {
1736 switch (info->gfx_level) {
1737 case GFX9:
1738 return sw_mode != ADDR_SW_LINEAR;
1739
1740 case GFX10:
1741 case GFX10_3:
1742 return sw_mode == ADDR_SW_64KB_Z_X || sw_mode == ADDR_SW_64KB_R_X;
1743
1744 case GFX11:
1745 case GFX11_5:
1746 return sw_mode == ADDR_SW_64KB_Z_X || sw_mode == ADDR_SW_64KB_R_X ||
1747 sw_mode == ADDR_SW_256KB_Z_X || sw_mode == ADDR_SW_256KB_R_X;
1748
1749 default:
1750 unreachable("invalid gfx_level");
1751 }
1752 }
1753
is_dcc_supported_by_L2(const struct radeon_info * info,const struct radeon_surf * surf)1754 ASSERTED static bool is_dcc_supported_by_L2(const struct radeon_info *info,
1755 const struct radeon_surf *surf)
1756 {
1757 assert(info->gfx_level < GFX12);
1758
1759 bool single_indep = surf->u.gfx9.color.dcc.independent_64B_blocks !=
1760 surf->u.gfx9.color.dcc.independent_128B_blocks;
1761 bool valid_64b = surf->u.gfx9.color.dcc.independent_64B_blocks &&
1762 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B;
1763 bool valid_128b = surf->u.gfx9.color.dcc.independent_128B_blocks &&
1764 (surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_128B ||
1765 (info->gfx_level >= GFX11_5 &&
1766 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_256B));
1767
1768 if (info->gfx_level <= GFX9) {
1769 /* Only independent 64B blocks are supported. */
1770 return single_indep && valid_64b;
1771 }
1772
1773 if (info->family == CHIP_NAVI10) {
1774 /* Only independent 128B blocks are supported. */
1775 return single_indep && valid_128b;
1776 }
1777
1778 if (info->family == CHIP_NAVI12 || info->family == CHIP_NAVI14) {
1779 /* Either 64B or 128B can be used, but the INDEPENDENT_*_BLOCKS setting must match.
1780 * If 64B is used, DCC image stores are unsupported.
1781 */
1782 return single_indep && (valid_64b || valid_128b);
1783 }
1784
1785 /* Valid settings are the same as NAVI14 + (64B && 128B && max_compressed_block_size == 64B) */
1786 return (single_indep && (valid_64b || valid_128b)) || valid_64b;
1787 }
1788
gfx10_DCN_requires_independent_64B_blocks(const struct radeon_info * info,const struct ac_surf_config * config)1789 static bool gfx10_DCN_requires_independent_64B_blocks(const struct radeon_info *info,
1790 const struct ac_surf_config *config)
1791 {
1792 assert(info->gfx_level >= GFX10);
1793
1794 /* Older kernels have buggy DAL. */
1795 if (info->drm_minor <= 43)
1796 return true;
1797
1798 /* For 4K, DCN requires INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B. */
1799 return config->info.width > 2560 || config->info.height > 2560;
1800 }
1801
ac_modifier_max_extent(const struct radeon_info * info,uint64_t modifier,uint32_t * width,uint32_t * height)1802 void ac_modifier_max_extent(const struct radeon_info *info,
1803 uint64_t modifier, uint32_t *width, uint32_t *height)
1804 {
1805 /* DCC is supported with any size. The maximum width per display pipe is 5760, but multiple
1806 * display pipes can be used to drive the display.
1807 */
1808 *width = 16384;
1809 *height = 16384;
1810
1811 if (info->gfx_level < GFX12 && ac_modifier_has_dcc(modifier)) {
1812 bool independent_64B_blocks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
1813
1814 if (info->gfx_level >= GFX10 && !independent_64B_blocks) {
1815 /* For 4K, DCN requires INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B. */
1816 *width = 2560;
1817 *height = 2560;
1818 }
1819 }
1820 }
1821
gfx9_is_dcc_supported_by_DCN(const struct radeon_info * info,const struct ac_surf_config * config,const struct radeon_surf * surf,bool rb_aligned,bool pipe_aligned)1822 static bool gfx9_is_dcc_supported_by_DCN(const struct radeon_info *info,
1823 const struct ac_surf_config *config,
1824 const struct radeon_surf *surf, bool rb_aligned,
1825 bool pipe_aligned)
1826 {
1827 if (!info->use_display_dcc_unaligned && !info->use_display_dcc_with_retile_blit)
1828 return false;
1829
1830 /* 16bpp and 64bpp are more complicated, so they are disallowed for now. */
1831 if (surf->bpe != 4)
1832 return false;
1833
1834 /* Handle unaligned DCC. */
1835 if (info->use_display_dcc_unaligned && (rb_aligned || pipe_aligned))
1836 return false;
1837
1838 switch (info->gfx_level) {
1839 case GFX9:
1840 /* There are more constraints, but we always set
1841 * INDEPENDENT_64B_BLOCKS = 1 and MAX_COMPRESSED_BLOCK_SIZE = 64B,
1842 * which always works.
1843 */
1844 assert(surf->u.gfx9.color.dcc.independent_64B_blocks &&
1845 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B);
1846 return true;
1847 case GFX10:
1848 case GFX10_3:
1849 case GFX11:
1850 case GFX11_5:
1851 /* DCN requires INDEPENDENT_128B_BLOCKS = 0 only on Navi1x. */
1852 if (info->gfx_level == GFX10 && surf->u.gfx9.color.dcc.independent_128B_blocks)
1853 return false;
1854
1855 return (!gfx10_DCN_requires_independent_64B_blocks(info, config) ||
1856 (surf->u.gfx9.color.dcc.independent_64B_blocks &&
1857 surf->u.gfx9.color.dcc.max_compressed_block_size == V_028C78_MAX_BLOCK_SIZE_64B));
1858 default:
1859 unreachable("unhandled chip");
1860 return false;
1861 }
1862 }
1863
ac_copy_dcc_equation(const struct radeon_info * info,ADDR2_COMPUTE_DCCINFO_OUTPUT * dcc,struct gfx9_meta_equation * equation)1864 static void ac_copy_dcc_equation(const struct radeon_info *info,
1865 ADDR2_COMPUTE_DCCINFO_OUTPUT *dcc,
1866 struct gfx9_meta_equation *equation)
1867 {
1868 assert(info->gfx_level < GFX12);
1869
1870 equation->meta_block_width = dcc->metaBlkWidth;
1871 equation->meta_block_height = dcc->metaBlkHeight;
1872 equation->meta_block_depth = dcc->metaBlkDepth;
1873
1874 if (info->gfx_level >= GFX10) {
1875 /* gfx9_meta_equation doesn't store the first 4 and the last 8 elements. They must be 0. */
1876 for (unsigned i = 0; i < 4; i++)
1877 assert(dcc->equation.gfx10_bits[i] == 0);
1878
1879 for (unsigned i = ARRAY_SIZE(equation->u.gfx10_bits) + 4; i < 68; i++)
1880 assert(dcc->equation.gfx10_bits[i] == 0);
1881
1882 memcpy(equation->u.gfx10_bits, dcc->equation.gfx10_bits + 4,
1883 sizeof(equation->u.gfx10_bits));
1884 } else {
1885 assert(dcc->equation.gfx9.num_bits <= ARRAY_SIZE(equation->u.gfx9.bit));
1886
1887 equation->u.gfx9.num_bits = dcc->equation.gfx9.num_bits;
1888 equation->u.gfx9.num_pipe_bits = dcc->equation.gfx9.numPipeBits;
1889 for (unsigned b = 0; b < ARRAY_SIZE(equation->u.gfx9.bit); b++) {
1890 for (unsigned c = 0; c < ARRAY_SIZE(equation->u.gfx9.bit[b].coord); c++) {
1891 equation->u.gfx9.bit[b].coord[c].dim = dcc->equation.gfx9.bit[b].coord[c].dim;
1892 equation->u.gfx9.bit[b].coord[c].ord = dcc->equation.gfx9.bit[b].coord[c].ord;
1893 }
1894 }
1895 }
1896 }
1897
ac_copy_cmask_equation(const struct radeon_info * info,ADDR2_COMPUTE_CMASK_INFO_OUTPUT * cmask,struct gfx9_meta_equation * equation)1898 static void ac_copy_cmask_equation(const struct radeon_info *info,
1899 ADDR2_COMPUTE_CMASK_INFO_OUTPUT *cmask,
1900 struct gfx9_meta_equation *equation)
1901 {
1902 assert(info->gfx_level < GFX11);
1903
1904 equation->meta_block_width = cmask->metaBlkWidth;
1905 equation->meta_block_height = cmask->metaBlkHeight;
1906 equation->meta_block_depth = 1;
1907
1908 if (info->gfx_level == GFX9) {
1909 assert(cmask->equation.gfx9.num_bits <= ARRAY_SIZE(equation->u.gfx9.bit));
1910
1911 equation->u.gfx9.num_bits = cmask->equation.gfx9.num_bits;
1912 equation->u.gfx9.num_pipe_bits = cmask->equation.gfx9.numPipeBits;
1913 for (unsigned b = 0; b < ARRAY_SIZE(equation->u.gfx9.bit); b++) {
1914 for (unsigned c = 0; c < ARRAY_SIZE(equation->u.gfx9.bit[b].coord); c++) {
1915 equation->u.gfx9.bit[b].coord[c].dim = cmask->equation.gfx9.bit[b].coord[c].dim;
1916 equation->u.gfx9.bit[b].coord[c].ord = cmask->equation.gfx9.bit[b].coord[c].ord;
1917 }
1918 }
1919 }
1920 }
1921
ac_copy_htile_equation(const struct radeon_info * info,ADDR2_COMPUTE_HTILE_INFO_OUTPUT * htile,struct gfx9_meta_equation * equation)1922 static void ac_copy_htile_equation(const struct radeon_info *info,
1923 ADDR2_COMPUTE_HTILE_INFO_OUTPUT *htile,
1924 struct gfx9_meta_equation *equation)
1925 {
1926 assert(info->gfx_level < GFX12);
1927
1928 equation->meta_block_width = htile->metaBlkWidth;
1929 equation->meta_block_height = htile->metaBlkHeight;
1930
1931 /* gfx9_meta_equation doesn't store the first 8 and the last 4 elements. They must be 0. */
1932 for (unsigned i = 0; i < 8; i++)
1933 assert(htile->equation.gfx10_bits[i] == 0);
1934
1935 for (unsigned i = ARRAY_SIZE(equation->u.gfx10_bits) + 8; i < 72; i++)
1936 assert(htile->equation.gfx10_bits[i] == 0);
1937
1938 memcpy(equation->u.gfx10_bits, htile->equation.gfx10_bits + 8,
1939 sizeof(equation->u.gfx10_bits));
1940 }
1941
gfx9_compute_miptree(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct ac_surf_config * config,struct radeon_surf * surf,bool compressed,ADDR2_COMPUTE_SURFACE_INFO_INPUT * in)1942 static int gfx9_compute_miptree(struct ac_addrlib *addrlib, const struct radeon_info *info,
1943 const struct ac_surf_config *config, struct radeon_surf *surf,
1944 bool compressed, ADDR2_COMPUTE_SURFACE_INFO_INPUT *in)
1945 {
1946 ADDR2_MIP_INFO mip_info[RADEON_SURF_MAX_LEVELS] = {0};
1947 ADDR2_COMPUTE_SURFACE_INFO_OUTPUT out = {0};
1948 ADDR_E_RETURNCODE ret;
1949
1950 out.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_OUTPUT);
1951 out.pMipInfo = mip_info;
1952
1953 ret = Addr2ComputeSurfaceInfo(addrlib->handle, in, &out);
1954 if (ret != ADDR_OK)
1955 return ret;
1956
1957 if (in->flags.prt) {
1958 surf->prt_tile_width = out.blockWidth;
1959 surf->prt_tile_height = out.blockHeight;
1960 surf->prt_tile_depth = out.blockSlices;
1961
1962 surf->first_mip_tail_level = out.firstMipIdInTail;
1963
1964 for (unsigned i = 0; i < in->numMipLevels; i++) {
1965 surf->u.gfx9.prt_level_offset[i] = mip_info[i].macroBlockOffset + mip_info[i].mipTailOffset;
1966
1967 if (info->gfx_level >= GFX10)
1968 surf->u.gfx9.prt_level_pitch[i] = mip_info[i].pitch;
1969 else
1970 surf->u.gfx9.prt_level_pitch[i] = out.mipChainPitch;
1971 }
1972 }
1973
1974 surf->thick_tiling = out.blockSlices > 1; /* should be 0 for depth and stencil */
1975
1976 if (in->flags.stencil) {
1977 surf->u.gfx9.zs.stencil_swizzle_mode = in->swizzleMode;
1978 surf->u.gfx9.zs.stencil_epitch =
1979 out.epitchIsHeight ? out.mipChainHeight - 1 : out.mipChainPitch - 1;
1980 surf->surf_alignment_log2 = MAX2(surf->surf_alignment_log2, util_logbase2(out.baseAlign));
1981 surf->u.gfx9.zs.stencil_offset = align(surf->surf_size, out.baseAlign);
1982 surf->surf_size = surf->u.gfx9.zs.stencil_offset + out.surfSize;
1983 return 0;
1984 }
1985
1986 surf->u.gfx9.swizzle_mode = in->swizzleMode;
1987 surf->u.gfx9.epitch = out.epitchIsHeight ? out.mipChainHeight - 1 : out.mipChainPitch - 1;
1988
1989 /* CMASK fast clear uses these even if FMASK isn't allocated.
1990 * FMASK only supports the Z swizzle modes, whose numbers are multiples of 4.
1991 */
1992 if (!in->flags.depth) {
1993 surf->u.gfx9.color.fmask_swizzle_mode = surf->u.gfx9.swizzle_mode & ~0x3;
1994 surf->u.gfx9.color.fmask_epitch = surf->u.gfx9.epitch;
1995 }
1996
1997 surf->u.gfx9.surf_slice_size = out.sliceSize;
1998 surf->u.gfx9.surf_pitch = out.pitch;
1999 surf->u.gfx9.surf_height = out.height;
2000 surf->surf_size = out.surfSize;
2001 surf->surf_alignment_log2 = util_logbase2(out.baseAlign);
2002
2003 const int linear_alignment =
2004 util_next_power_of_two(LINEAR_PITCH_ALIGNMENT / surf->bpe);
2005
2006 if (!compressed && surf->blk_w > 1 && out.pitch == out.pixelPitch &&
2007 surf->u.gfx9.swizzle_mode == ADDR_SW_LINEAR &&
2008 in->numMipLevels == 1) {
2009 /* Divide surf_pitch (= pitch in pixels) by blk_w to get a
2010 * pitch in elements instead because that's what the hardware needs
2011 * in resource descriptors.
2012 * See the comment in si_descriptors.c.
2013 */
2014 surf->u.gfx9.surf_pitch = align(surf->u.gfx9.surf_pitch / surf->blk_w,
2015 linear_alignment);
2016 surf->u.gfx9.epitch = surf->u.gfx9.surf_pitch - 1;
2017 /* Adjust surf_slice_size and surf_size to reflect the change made to surf_pitch. */
2018 surf->u.gfx9.surf_slice_size = (uint64_t)surf->u.gfx9.surf_pitch * out.height * surf->bpe;
2019 surf->surf_size = surf->u.gfx9.surf_slice_size * in->numSlices;
2020
2021 for (unsigned i = 0; i < in->numMipLevels; i++) {
2022 surf->u.gfx9.offset[i] = mip_info[i].offset;
2023 /* Adjust pitch like we did for surf_pitch */
2024 surf->u.gfx9.pitch[i] = align(mip_info[i].pitch / surf->blk_w,
2025 linear_alignment);
2026 }
2027 surf->u.gfx9.base_mip_width = surf->u.gfx9.surf_pitch;
2028 } else if (in->swizzleMode == ADDR_SW_LINEAR) {
2029 for (unsigned i = 0; i < in->numMipLevels; i++) {
2030 surf->u.gfx9.offset[i] = mip_info[i].offset;
2031 surf->u.gfx9.pitch[i] = mip_info[i].pitch;
2032 }
2033 surf->u.gfx9.base_mip_width = surf->u.gfx9.surf_pitch;
2034 } else {
2035 surf->u.gfx9.base_mip_width = mip_info[0].pitch;
2036 }
2037
2038 surf->u.gfx9.base_mip_height = mip_info[0].height;
2039
2040 if (in->flags.depth) {
2041 assert(in->swizzleMode != ADDR_SW_LINEAR);
2042
2043 if (surf->flags & RADEON_SURF_NO_HTILE)
2044 return 0;
2045
2046 /* HTILE */
2047 ADDR2_COMPUTE_HTILE_INFO_INPUT hin = {0};
2048 ADDR2_COMPUTE_HTILE_INFO_OUTPUT hout = {0};
2049 ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {0};
2050
2051 hin.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_INPUT);
2052 hout.size = sizeof(ADDR2_COMPUTE_HTILE_INFO_OUTPUT);
2053 hout.pMipInfo = meta_mip_info;
2054
2055 assert(in->flags.metaPipeUnaligned == 0);
2056 assert(in->flags.metaRbUnaligned == 0);
2057
2058 hin.hTileFlags.pipeAligned = 1;
2059 hin.hTileFlags.rbAligned = 1;
2060 hin.depthFlags = in->flags;
2061 hin.swizzleMode = in->swizzleMode;
2062 hin.unalignedWidth = in->width;
2063 hin.unalignedHeight = in->height;
2064 hin.numSlices = in->numSlices;
2065 hin.numMipLevels = in->numMipLevels;
2066 hin.firstMipIdInTail = out.firstMipIdInTail;
2067
2068 ret = Addr2ComputeHtileInfo(addrlib->handle, &hin, &hout);
2069 if (ret != ADDR_OK)
2070 return ret;
2071
2072 surf->meta_size = hout.htileBytes;
2073 surf->meta_slice_size = hout.sliceSize;
2074 surf->meta_alignment_log2 = util_logbase2(hout.baseAlign);
2075 surf->meta_pitch = hout.pitch;
2076 surf->num_meta_levels = in->numMipLevels;
2077
2078 for (unsigned i = 0; i < in->numMipLevels; i++) {
2079 surf->u.gfx9.meta_levels[i].offset = meta_mip_info[i].offset;
2080 surf->u.gfx9.meta_levels[i].size = meta_mip_info[i].sliceSize;
2081
2082 if (meta_mip_info[i].inMiptail) {
2083 /* GFX10 can only compress the first level
2084 * in the mip tail.
2085 */
2086 surf->num_meta_levels = i + 1;
2087 break;
2088 }
2089 }
2090
2091 if (!surf->num_meta_levels)
2092 surf->meta_size = 0;
2093
2094 if (info->gfx_level >= GFX10)
2095 ac_copy_htile_equation(info, &hout, &surf->u.gfx9.zs.htile_equation);
2096 return 0;
2097 }
2098
2099 {
2100 /* Compute tile swizzle for the color surface.
2101 * All *_X and *_T modes can use the swizzle.
2102 */
2103 if (config->info.surf_index && in->swizzleMode >= ADDR_SW_64KB_Z_T && !out.mipChainInTail &&
2104 !(surf->flags & RADEON_SURF_SHAREABLE) && !in->flags.display) {
2105 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
2106 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
2107
2108 xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
2109 xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
2110
2111 xin.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
2112 xin.flags = in->flags;
2113 xin.swizzleMode = in->swizzleMode;
2114 xin.resourceType = in->resourceType;
2115 xin.format = in->format;
2116 xin.numSamples = in->numSamples;
2117 xin.numFrags = in->numFrags;
2118
2119 ret = Addr2ComputePipeBankXor(addrlib->handle, &xin, &xout);
2120 if (ret != ADDR_OK)
2121 return ret;
2122
2123 assert(xout.pipeBankXor <= u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8));
2124 surf->tile_swizzle = xout.pipeBankXor;
2125
2126 /* Gfx11 should shift it by 10 bits instead of 8, and drivers already shift it by 8 bits,
2127 * so shift it by 2 bits here.
2128 */
2129 if (info->gfx_level >= GFX11)
2130 surf->tile_swizzle <<= 2;
2131 }
2132
2133 bool use_dcc = false;
2134 if (surf->modifier != DRM_FORMAT_MOD_INVALID) {
2135 use_dcc = ac_modifier_has_dcc(surf->modifier);
2136 } else {
2137 use_dcc = info->has_graphics && !(surf->flags & RADEON_SURF_DISABLE_DCC) && !compressed &&
2138 !config->is_3d &&
2139 is_dcc_supported_by_CB(info, in->swizzleMode) &&
2140 (!in->flags.display ||
2141 gfx9_is_dcc_supported_by_DCN(info, config, surf, !in->flags.metaRbUnaligned,
2142 !in->flags.metaPipeUnaligned));
2143 }
2144
2145 /* DCC */
2146 if (use_dcc) {
2147 ADDR2_COMPUTE_DCCINFO_INPUT din = {0};
2148 ADDR2_COMPUTE_DCCINFO_OUTPUT dout = {0};
2149 ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {0};
2150
2151 din.size = sizeof(ADDR2_COMPUTE_DCCINFO_INPUT);
2152 dout.size = sizeof(ADDR2_COMPUTE_DCCINFO_OUTPUT);
2153 dout.pMipInfo = meta_mip_info;
2154
2155 din.dccKeyFlags.pipeAligned = !in->flags.metaPipeUnaligned;
2156 din.dccKeyFlags.rbAligned = !in->flags.metaRbUnaligned;
2157 din.resourceType = in->resourceType;
2158 din.swizzleMode = in->swizzleMode;
2159 din.bpp = in->bpp;
2160 din.unalignedWidth = in->width;
2161 din.unalignedHeight = in->height;
2162 din.numSlices = in->numSlices;
2163 din.numFrags = in->numFrags;
2164 din.numMipLevels = in->numMipLevels;
2165 din.dataSurfaceSize = out.surfSize;
2166 din.firstMipIdInTail = out.firstMipIdInTail;
2167
2168 if (info->gfx_level == GFX9)
2169 simple_mtx_lock(&addrlib->lock);
2170 ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
2171 if (info->gfx_level == GFX9)
2172 simple_mtx_unlock(&addrlib->lock);
2173
2174 if (ret != ADDR_OK)
2175 return ret;
2176
2177 surf->u.gfx9.color.dcc.rb_aligned = din.dccKeyFlags.rbAligned;
2178 surf->u.gfx9.color.dcc.pipe_aligned = din.dccKeyFlags.pipeAligned;
2179 surf->u.gfx9.color.dcc_block_width = dout.compressBlkWidth;
2180 surf->u.gfx9.color.dcc_block_height = dout.compressBlkHeight;
2181 surf->u.gfx9.color.dcc_block_depth = dout.compressBlkDepth;
2182 surf->u.gfx9.color.dcc_pitch_max = dout.pitch - 1;
2183 surf->u.gfx9.color.dcc_height = dout.height;
2184 surf->meta_size = dout.dccRamSize;
2185 surf->meta_slice_size = dout.dccRamSliceSize;
2186 surf->meta_alignment_log2 = util_logbase2(dout.dccRamBaseAlign);
2187 surf->num_meta_levels = in->numMipLevels;
2188
2189 /* Disable DCC for levels that are in the mip tail.
2190 *
2191 * There are two issues that this is intended to
2192 * address:
2193 *
2194 * 1. Multiple mip levels may share a cache line. This
2195 * can lead to corruption when switching between
2196 * rendering to different mip levels because the
2197 * RBs don't maintain coherency.
2198 *
2199 * 2. Texturing with metadata after rendering sometimes
2200 * fails with corruption, probably for a similar
2201 * reason.
2202 *
2203 * Working around these issues for all levels in the
2204 * mip tail may be overly conservative, but it's what
2205 * Vulkan does.
2206 *
2207 * Alternative solutions that also work but are worse:
2208 * - Disable DCC entirely.
2209 * - Flush the L2 cache after rendering.
2210 */
2211 for (unsigned i = 0; i < in->numMipLevels; i++) {
2212 surf->u.gfx9.meta_levels[i].offset = meta_mip_info[i].offset;
2213 surf->u.gfx9.meta_levels[i].size = meta_mip_info[i].sliceSize;
2214
2215 if (meta_mip_info[i].inMiptail) {
2216 /* GFX10 can only compress the first level
2217 * in the mip tail.
2218 *
2219 * TODO: Try to do the same thing for gfx9
2220 * if there are no regressions.
2221 */
2222 if (info->gfx_level >= GFX10)
2223 surf->num_meta_levels = i + 1;
2224 else
2225 surf->num_meta_levels = i;
2226 break;
2227 }
2228 }
2229
2230 if (!surf->num_meta_levels)
2231 surf->meta_size = 0;
2232
2233 surf->u.gfx9.color.display_dcc_size = surf->meta_size;
2234 surf->u.gfx9.color.display_dcc_alignment_log2 = surf->meta_alignment_log2;
2235 surf->u.gfx9.color.display_dcc_pitch_max = surf->u.gfx9.color.dcc_pitch_max;
2236 surf->u.gfx9.color.display_dcc_height = surf->u.gfx9.color.dcc_height;
2237
2238 if (in->resourceType == ADDR_RSRC_TEX_2D)
2239 ac_copy_dcc_equation(info, &dout, &surf->u.gfx9.color.dcc_equation);
2240
2241 /* Compute displayable DCC. */
2242 if (((in->flags.display && info->use_display_dcc_with_retile_blit) ||
2243 ac_modifier_has_dcc_retile(surf->modifier)) && surf->num_meta_levels) {
2244 /* Compute displayable DCC info. */
2245 din.dccKeyFlags.pipeAligned = 0;
2246 din.dccKeyFlags.rbAligned = 0;
2247
2248 assert(din.numSlices == 1);
2249 assert(din.numMipLevels == 1);
2250 assert(din.numFrags == 1);
2251 assert(surf->tile_swizzle == 0);
2252 assert(surf->u.gfx9.color.dcc.pipe_aligned || surf->u.gfx9.color.dcc.rb_aligned);
2253
2254 if (info->gfx_level == GFX9)
2255 simple_mtx_lock(&addrlib->lock);
2256 ret = Addr2ComputeDccInfo(addrlib->handle, &din, &dout);
2257 if (info->gfx_level == GFX9)
2258 simple_mtx_unlock(&addrlib->lock);
2259
2260 if (ret != ADDR_OK)
2261 return ret;
2262
2263 surf->u.gfx9.color.display_dcc_size = dout.dccRamSize;
2264 surf->u.gfx9.color.display_dcc_alignment_log2 = util_logbase2(dout.dccRamBaseAlign);
2265 surf->u.gfx9.color.display_dcc_pitch_max = dout.pitch - 1;
2266 surf->u.gfx9.color.display_dcc_height = dout.height;
2267 assert(surf->u.gfx9.color.display_dcc_size <= surf->meta_size);
2268
2269 ac_copy_dcc_equation(info, &dout, &surf->u.gfx9.color.display_dcc_equation);
2270 surf->u.gfx9.color.dcc.display_equation_valid = true;
2271 }
2272 }
2273
2274 /* FMASK (it doesn't exist on GFX11) */
2275 if (info->gfx_level <= GFX10_3 && info->has_graphics &&
2276 in->numSamples > 1 && !(surf->flags & RADEON_SURF_NO_FMASK)) {
2277 ADDR2_COMPUTE_FMASK_INFO_INPUT fin = {0};
2278 ADDR2_COMPUTE_FMASK_INFO_OUTPUT fout = {0};
2279
2280 fin.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_INPUT);
2281 fout.size = sizeof(ADDR2_COMPUTE_FMASK_INFO_OUTPUT);
2282
2283 ret = gfx9_get_preferred_swizzle_mode(addrlib->handle, info, surf, in, true, &fin.swizzleMode);
2284 if (ret != ADDR_OK)
2285 return ret;
2286
2287 fin.unalignedWidth = in->width;
2288 fin.unalignedHeight = in->height;
2289 fin.numSlices = in->numSlices;
2290 fin.numSamples = in->numSamples;
2291 fin.numFrags = in->numFrags;
2292
2293 ret = Addr2ComputeFmaskInfo(addrlib->handle, &fin, &fout);
2294 if (ret != ADDR_OK)
2295 return ret;
2296
2297 surf->u.gfx9.color.fmask_swizzle_mode = fin.swizzleMode;
2298 surf->u.gfx9.color.fmask_epitch = fout.pitch - 1;
2299 surf->fmask_size = fout.fmaskBytes;
2300 surf->fmask_alignment_log2 = util_logbase2(fout.baseAlign);
2301 surf->fmask_slice_size = fout.sliceSize;
2302
2303 /* Compute tile swizzle for the FMASK surface. */
2304 if (config->info.fmask_surf_index && fin.swizzleMode >= ADDR_SW_64KB_Z_T &&
2305 !(surf->flags & RADEON_SURF_SHAREABLE)) {
2306 ADDR2_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
2307 ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
2308
2309 xin.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_INPUT);
2310 xout.size = sizeof(ADDR2_COMPUTE_PIPEBANKXOR_OUTPUT);
2311
2312 /* This counter starts from 1 instead of 0. */
2313 xin.surfIndex = p_atomic_inc_return(config->info.fmask_surf_index);
2314 xin.flags = in->flags;
2315 xin.swizzleMode = fin.swizzleMode;
2316 xin.resourceType = in->resourceType;
2317 xin.format = in->format;
2318 xin.numSamples = in->numSamples;
2319 xin.numFrags = in->numFrags;
2320
2321 ret = Addr2ComputePipeBankXor(addrlib->handle, &xin, &xout);
2322 if (ret != ADDR_OK)
2323 return ret;
2324
2325 assert(xout.pipeBankXor <= u_bit_consecutive(0, sizeof(surf->fmask_tile_swizzle) * 8));
2326 surf->fmask_tile_swizzle = xout.pipeBankXor;
2327 }
2328 }
2329
2330 /* CMASK -- on GFX10 only for FMASK (and it doesn't exist on GFX11) */
2331 if (info->gfx_level <= GFX10_3 && info->has_graphics &&
2332 in->swizzleMode != ADDR_SW_LINEAR && in->resourceType == ADDR_RSRC_TEX_2D &&
2333 ((info->gfx_level <= GFX9 && in->numSamples == 1 && in->flags.metaPipeUnaligned == 0 &&
2334 in->flags.metaRbUnaligned == 0) ||
2335 (surf->fmask_size && in->numSamples >= 2))) {
2336 ADDR2_COMPUTE_CMASK_INFO_INPUT cin = {0};
2337 ADDR2_COMPUTE_CMASK_INFO_OUTPUT cout = {0};
2338 ADDR2_META_MIP_INFO meta_mip_info[RADEON_SURF_MAX_LEVELS] = {0};
2339
2340 cin.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_INPUT);
2341 cout.size = sizeof(ADDR2_COMPUTE_CMASK_INFO_OUTPUT);
2342 cout.pMipInfo = meta_mip_info;
2343
2344 assert(in->flags.metaPipeUnaligned == 0);
2345 assert(in->flags.metaRbUnaligned == 0);
2346
2347 cin.cMaskFlags.pipeAligned = 1;
2348 cin.cMaskFlags.rbAligned = 1;
2349 cin.resourceType = in->resourceType;
2350 cin.unalignedWidth = in->width;
2351 cin.unalignedHeight = in->height;
2352 cin.numSlices = in->numSlices;
2353 cin.numMipLevels = in->numMipLevels;
2354 cin.firstMipIdInTail = out.firstMipIdInTail;
2355
2356 if (in->numSamples > 1)
2357 cin.swizzleMode = surf->u.gfx9.color.fmask_swizzle_mode;
2358 else
2359 cin.swizzleMode = in->swizzleMode;
2360
2361 if (info->gfx_level == GFX9)
2362 simple_mtx_lock(&addrlib->lock);
2363 ret = Addr2ComputeCmaskInfo(addrlib->handle, &cin, &cout);
2364 if (info->gfx_level == GFX9)
2365 simple_mtx_unlock(&addrlib->lock);
2366
2367 if (ret != ADDR_OK)
2368 return ret;
2369
2370 surf->cmask_size = cout.cmaskBytes;
2371 surf->cmask_alignment_log2 = util_logbase2(cout.baseAlign);
2372 surf->cmask_slice_size = cout.sliceSize;
2373 surf->cmask_pitch = cout.pitch;
2374 surf->cmask_height = cout.height;
2375 surf->u.gfx9.color.cmask_level0.offset = meta_mip_info[0].offset;
2376 surf->u.gfx9.color.cmask_level0.size = meta_mip_info[0].sliceSize;
2377
2378 ac_copy_cmask_equation(info, &cout, &surf->u.gfx9.color.cmask_equation);
2379 }
2380 }
2381
2382 return 0;
2383 }
2384
gfx9_compute_surface(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct ac_surf_config * config,enum radeon_surf_mode mode,struct radeon_surf * surf)2385 static int gfx9_compute_surface(struct ac_addrlib *addrlib, const struct radeon_info *info,
2386 const struct ac_surf_config *config, enum radeon_surf_mode mode,
2387 struct radeon_surf *surf)
2388 {
2389 bool compressed;
2390 ADDR2_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
2391 int r;
2392
2393 AddrSurfInfoIn.size = sizeof(ADDR2_COMPUTE_SURFACE_INFO_INPUT);
2394
2395 compressed = surf->blk_w == 4 && surf->blk_h == 4;
2396
2397 AddrSurfInfoIn.format = bpe_to_format(surf);
2398 if (!compressed)
2399 AddrSurfInfoIn.bpp = surf->bpe * 8;
2400
2401 bool is_color_surface = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER);
2402 AddrSurfInfoIn.flags.color = is_color_surface && !(surf->flags & RADEON_SURF_NO_RENDER_TARGET);
2403 AddrSurfInfoIn.flags.depth = (surf->flags & RADEON_SURF_ZBUFFER) != 0;
2404 AddrSurfInfoIn.flags.display = get_display_flag(config, surf);
2405 /* flags.texture currently refers to TC-compatible HTILE */
2406 AddrSurfInfoIn.flags.texture = (is_color_surface && !(surf->flags & RADEON_SURF_NO_TEXTURE)) ||
2407 (surf->flags & RADEON_SURF_TC_COMPATIBLE_HTILE);
2408 AddrSurfInfoIn.flags.opt4space = 1;
2409 AddrSurfInfoIn.flags.prt = (surf->flags & RADEON_SURF_PRT) != 0;
2410
2411 AddrSurfInfoIn.numMipLevels = config->info.levels;
2412 AddrSurfInfoIn.numSamples = MAX2(1, config->info.samples);
2413 AddrSurfInfoIn.numFrags = AddrSurfInfoIn.numSamples;
2414
2415 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER))
2416 AddrSurfInfoIn.numFrags = MAX2(1, config->info.storage_samples);
2417
2418 /* GFX9 doesn't support 1D depth textures, so allocate all 1D textures
2419 * as 2D to avoid having shader variants for 1D vs 2D, so all shaders
2420 * must sample 1D textures as 2D. */
2421 if (config->is_3d)
2422 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_3D;
2423 else if (info->gfx_level != GFX9 && config->is_1d)
2424 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_1D;
2425 else
2426 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_2D;
2427
2428 AddrSurfInfoIn.width = config->info.width;
2429 AddrSurfInfoIn.height = config->info.height;
2430
2431 if (config->is_3d)
2432 AddrSurfInfoIn.numSlices = config->info.depth;
2433 else if (config->is_cube)
2434 AddrSurfInfoIn.numSlices = 6;
2435 else
2436 AddrSurfInfoIn.numSlices = config->info.array_size;
2437
2438 /* This is propagated to DCC. It must be 0 for HTILE and CMASK. */
2439 AddrSurfInfoIn.flags.metaPipeUnaligned = 0;
2440 AddrSurfInfoIn.flags.metaRbUnaligned = 0;
2441
2442 if (ac_modifier_has_dcc(surf->modifier)) {
2443 ac_modifier_fill_dcc_params(surf->modifier, surf, &AddrSurfInfoIn);
2444 } else if (!AddrSurfInfoIn.flags.depth && !AddrSurfInfoIn.flags.stencil) {
2445 /* Optimal values for the L2 cache. */
2446 /* Don't change the DCC settings for imported buffers - they might differ. */
2447 if (!(surf->flags & RADEON_SURF_IMPORTED)) {
2448 if (info->gfx_level >= GFX11_5) {
2449 surf->u.gfx9.color.dcc.independent_64B_blocks = 0;
2450 surf->u.gfx9.color.dcc.independent_128B_blocks = 1;
2451 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_256B;
2452 } else if (info->gfx_level >= GFX10) {
2453 surf->u.gfx9.color.dcc.independent_64B_blocks = 0;
2454 surf->u.gfx9.color.dcc.independent_128B_blocks = 1;
2455 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
2456 } else if (info->gfx_level == GFX9) {
2457 surf->u.gfx9.color.dcc.independent_64B_blocks = 1;
2458 surf->u.gfx9.color.dcc.independent_128B_blocks = 0;
2459 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
2460 }
2461 }
2462
2463 if (AddrSurfInfoIn.flags.display) {
2464 /* The display hardware can only read DCC with RB_ALIGNED=0 and
2465 * PIPE_ALIGNED=0. PIPE_ALIGNED really means L2CACHE_ALIGNED.
2466 *
2467 * The CB block requires RB_ALIGNED=1 except 1 RB chips.
2468 * PIPE_ALIGNED is optional, but PIPE_ALIGNED=0 requires L2 flushes
2469 * after rendering, so PIPE_ALIGNED=1 is recommended.
2470 */
2471 if (info->use_display_dcc_unaligned) {
2472 AddrSurfInfoIn.flags.metaPipeUnaligned = 1;
2473 AddrSurfInfoIn.flags.metaRbUnaligned = 1;
2474 }
2475
2476 /* Adjust DCC settings to meet DCN requirements. */
2477 /* Don't change the DCC settings for imported buffers - they might differ. */
2478 if (!(surf->flags & RADEON_SURF_IMPORTED) &&
2479 (info->use_display_dcc_unaligned || info->use_display_dcc_with_retile_blit)) {
2480 /* Only Navi12/14 support independent 64B blocks in L2,
2481 * but without DCC image stores.
2482 */
2483 if (info->family == CHIP_NAVI12 || info->family == CHIP_NAVI14) {
2484 surf->u.gfx9.color.dcc.independent_64B_blocks = 1;
2485 surf->u.gfx9.color.dcc.independent_128B_blocks = 0;
2486 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
2487 }
2488
2489 if ((info->gfx_level >= GFX10_3 && info->family <= CHIP_REMBRANDT) ||
2490 /* Newer chips will skip this when possible to get better performance.
2491 * This is also possible for other gfx10.3 chips, but is disabled for
2492 * interoperability between different Mesa versions.
2493 */
2494 (info->family > CHIP_REMBRANDT &&
2495 gfx10_DCN_requires_independent_64B_blocks(info, config))) {
2496 surf->u.gfx9.color.dcc.independent_64B_blocks = 1;
2497 surf->u.gfx9.color.dcc.independent_128B_blocks = 1;
2498 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
2499 }
2500 }
2501 }
2502 }
2503
2504 if (surf->modifier == DRM_FORMAT_MOD_INVALID) {
2505 switch (mode) {
2506 case RADEON_SURF_MODE_LINEAR_ALIGNED:
2507 assert(config->info.samples <= 1);
2508 assert(!(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
2509 AddrSurfInfoIn.swizzleMode = ADDR_SW_LINEAR;
2510 break;
2511
2512 case RADEON_SURF_MODE_1D:
2513 case RADEON_SURF_MODE_2D:
2514 if (surf->flags & RADEON_SURF_IMPORTED ||
2515 (info->gfx_level >= GFX10 && surf->flags & RADEON_SURF_FORCE_SWIZZLE_MODE)) {
2516 AddrSurfInfoIn.swizzleMode = surf->u.gfx9.swizzle_mode;
2517 break;
2518 }
2519
2520 /* On GFX11, the only allowed swizzle mode for VRS rate images is
2521 * 64KB_R_X.
2522 */
2523 if (info->gfx_level >= GFX11 && surf->flags & RADEON_SURF_VRS_RATE) {
2524 AddrSurfInfoIn.swizzleMode = ADDR_SW_64KB_R_X;
2525 break;
2526 }
2527
2528 /* VCN only supports 256B_D. */
2529 if (surf->flags & RADEON_SURF_VIDEO_REFERENCE) {
2530 AddrSurfInfoIn.swizzleMode = ADDR_SW_256B_D;
2531 break;
2532 }
2533
2534 r = gfx9_get_preferred_swizzle_mode(addrlib->handle, info, surf, &AddrSurfInfoIn, false,
2535 &AddrSurfInfoIn.swizzleMode);
2536 if (r)
2537 return r;
2538 break;
2539
2540 default:
2541 assert(0);
2542 }
2543 } else {
2544 /* We have a valid and required modifier here. */
2545
2546 assert(!compressed);
2547 assert(!ac_modifier_has_dcc(surf->modifier) ||
2548 !(surf->flags & RADEON_SURF_DISABLE_DCC));
2549
2550 AddrSurfInfoIn.swizzleMode = ac_get_modifier_swizzle_mode(info->gfx_level, surf->modifier);
2551 }
2552
2553 surf->u.gfx9.resource_type = (enum gfx9_resource_type)AddrSurfInfoIn.resourceType;
2554 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
2555
2556 surf->num_meta_levels = 0;
2557 surf->surf_size = 0;
2558 surf->fmask_size = 0;
2559 surf->meta_size = 0;
2560 surf->meta_slice_size = 0;
2561 surf->u.gfx9.surf_offset = 0;
2562 if (AddrSurfInfoIn.flags.stencil)
2563 surf->u.gfx9.zs.stencil_offset = 0;
2564 surf->cmask_size = 0;
2565
2566 const bool only_stencil =
2567 (surf->flags & RADEON_SURF_SBUFFER) && !(surf->flags & RADEON_SURF_ZBUFFER);
2568
2569 /* Calculate texture layout information. */
2570 if (!only_stencil) {
2571 r = gfx9_compute_miptree(addrlib, info, config, surf, compressed, &AddrSurfInfoIn);
2572 if (r)
2573 return r;
2574 }
2575
2576 /* Calculate texture layout information for stencil. */
2577 if (surf->flags & RADEON_SURF_SBUFFER) {
2578 AddrSurfInfoIn.flags.stencil = 1;
2579 AddrSurfInfoIn.bpp = 8;
2580 AddrSurfInfoIn.format = ADDR_FMT_8;
2581
2582 if (!AddrSurfInfoIn.flags.depth) {
2583 r = gfx9_get_preferred_swizzle_mode(addrlib->handle, info, surf, &AddrSurfInfoIn, false,
2584 &AddrSurfInfoIn.swizzleMode);
2585 if (r)
2586 return r;
2587 } else
2588 AddrSurfInfoIn.flags.depth = 0;
2589
2590 r = gfx9_compute_miptree(addrlib, info, config, surf, compressed, &AddrSurfInfoIn);
2591 if (r)
2592 return r;
2593 }
2594
2595 surf->is_linear = (only_stencil ? surf->u.gfx9.zs.stencil_swizzle_mode :
2596 surf->u.gfx9.swizzle_mode) == ADDR_SW_LINEAR;
2597
2598 /* Query whether the surface is displayable. */
2599 /* This is only useful for surfaces that are allocated without SCANOUT. */
2600 BOOL_32 displayable = false;
2601 if (!config->is_3d && !config->is_cube) {
2602 r = Addr2IsValidDisplaySwizzleMode(addrlib->handle, surf->u.gfx9.swizzle_mode,
2603 surf->bpe * 8, &displayable);
2604 if (r)
2605 return r;
2606
2607 /* Display needs unaligned DCC. */
2608 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
2609 surf->num_meta_levels &&
2610 (!gfx9_is_dcc_supported_by_DCN(info, config, surf, surf->u.gfx9.color.dcc.rb_aligned,
2611 surf->u.gfx9.color.dcc.pipe_aligned) ||
2612 /* Don't set is_displayable if displayable DCC is missing. */
2613 (info->use_display_dcc_with_retile_blit && !surf->u.gfx9.color.dcc.display_equation_valid)))
2614 displayable = false;
2615 }
2616 surf->is_displayable = displayable;
2617
2618 /* Validate that we allocated a displayable surface if requested. */
2619 assert(!AddrSurfInfoIn.flags.display || surf->is_displayable);
2620
2621 /* Validate that DCC is set up correctly. */
2622 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && surf->num_meta_levels) {
2623 assert(is_dcc_supported_by_L2(info, surf));
2624 if (AddrSurfInfoIn.flags.color)
2625 assert(is_dcc_supported_by_CB(info, surf->u.gfx9.swizzle_mode));
2626 if (AddrSurfInfoIn.flags.display && surf->modifier == DRM_FORMAT_MOD_INVALID) {
2627 assert(gfx9_is_dcc_supported_by_DCN(info, config, surf, surf->u.gfx9.color.dcc.rb_aligned,
2628 surf->u.gfx9.color.dcc.pipe_aligned));
2629 }
2630 }
2631
2632 if (info->has_graphics && !compressed && !config->is_3d && config->info.levels == 1 &&
2633 AddrSurfInfoIn.flags.color && !surf->is_linear &&
2634 (1 << surf->surf_alignment_log2) >= 64 * 1024 && /* 64KB tiling */
2635 !(surf->flags & (RADEON_SURF_DISABLE_DCC | RADEON_SURF_FORCE_SWIZZLE_MODE |
2636 RADEON_SURF_FORCE_MICRO_TILE_MODE)) &&
2637 surf->modifier == DRM_FORMAT_MOD_INVALID &&
2638 gfx9_is_dcc_supported_by_DCN(info, config, surf, surf->u.gfx9.color.dcc.rb_aligned,
2639 surf->u.gfx9.color.dcc.pipe_aligned)) {
2640 /* Validate that DCC is enabled if DCN can do it. */
2641 if ((info->use_display_dcc_unaligned || info->use_display_dcc_with_retile_blit) &&
2642 AddrSurfInfoIn.flags.display && surf->bpe == 4) {
2643 assert(surf->num_meta_levels);
2644 }
2645
2646 /* Validate that non-scanout DCC is always enabled. */
2647 if (!AddrSurfInfoIn.flags.display)
2648 assert(surf->num_meta_levels);
2649 }
2650
2651 if (!surf->meta_size) {
2652 /* Unset this if HTILE is not present. */
2653 surf->flags &= ~RADEON_SURF_TC_COMPATIBLE_HTILE;
2654 }
2655
2656 if (surf->modifier != DRM_FORMAT_MOD_INVALID) {
2657 assert((surf->num_meta_levels != 0) == ac_modifier_has_dcc(surf->modifier));
2658 }
2659
2660 switch (surf->u.gfx9.swizzle_mode) {
2661 /* S = standard. */
2662 case ADDR_SW_256B_S:
2663 case ADDR_SW_4KB_S:
2664 case ADDR_SW_64KB_S:
2665 case ADDR_SW_64KB_S_T:
2666 case ADDR_SW_4KB_S_X:
2667 case ADDR_SW_64KB_S_X:
2668 case ADDR_SW_256KB_S_X:
2669 surf->micro_tile_mode = RADEON_MICRO_MODE_STANDARD;
2670 break;
2671
2672 /* D = display. */
2673 case ADDR_SW_LINEAR:
2674 case ADDR_SW_256B_D:
2675 case ADDR_SW_4KB_D:
2676 case ADDR_SW_64KB_D:
2677 case ADDR_SW_64KB_D_T:
2678 case ADDR_SW_4KB_D_X:
2679 case ADDR_SW_64KB_D_X:
2680 case ADDR_SW_256KB_D_X:
2681 surf->micro_tile_mode = RADEON_MICRO_MODE_DISPLAY;
2682 break;
2683
2684 /* R = rotated (gfx9), render target (gfx10). */
2685 case ADDR_SW_256B_R:
2686 case ADDR_SW_4KB_R:
2687 case ADDR_SW_64KB_R:
2688 case ADDR_SW_64KB_R_T:
2689 case ADDR_SW_4KB_R_X:
2690 case ADDR_SW_64KB_R_X:
2691 case ADDR_SW_256KB_R_X:
2692 /* The rotated micro tile mode doesn't work if both CMASK and RB+ are
2693 * used at the same time. We currently do not use rotated
2694 * in gfx9.
2695 */
2696 assert(info->gfx_level >= GFX10 || !"rotate micro tile mode is unsupported");
2697 surf->micro_tile_mode = RADEON_MICRO_MODE_RENDER;
2698 break;
2699
2700 /* Z = depth. */
2701 case ADDR_SW_4KB_Z:
2702 case ADDR_SW_64KB_Z:
2703 case ADDR_SW_64KB_Z_T:
2704 case ADDR_SW_4KB_Z_X:
2705 case ADDR_SW_64KB_Z_X:
2706 case ADDR_SW_256KB_Z_X:
2707 surf->micro_tile_mode = RADEON_MICRO_MODE_DEPTH;
2708 break;
2709
2710 default:
2711 assert(0);
2712 }
2713
2714 return 0;
2715 }
2716
gfx12_estimate_size(const ADDR3_COMPUTE_SURFACE_INFO_INPUT * in,const struct radeon_surf * surf,unsigned align_width,unsigned align_height,unsigned align_depth)2717 static uint64_t gfx12_estimate_size(const ADDR3_COMPUTE_SURFACE_INFO_INPUT *in,
2718 const struct radeon_surf *surf,
2719 unsigned align_width, unsigned align_height,
2720 unsigned align_depth)
2721 {
2722 unsigned blk_w = surf ? surf->blk_w : 1;
2723 unsigned blk_h = surf ? surf->blk_h : 1;
2724 unsigned bpe = in->bpp ? in->bpp / 8 : surf->bpe;
2725 unsigned width = align(in->width, align_width * blk_w);
2726 unsigned height = align(in->height, align_height * blk_h);
2727 unsigned depth = align(in->numSlices, align_depth);
2728 unsigned tile_size = align_width * align_height * align_depth *
2729 in->numSamples * bpe;
2730
2731 if (in->numMipLevels > 1 && align_height > 1) {
2732 width = util_next_power_of_two(width);
2733 height = util_next_power_of_two(height);
2734 }
2735
2736 uint64_t size = 0;
2737
2738 /* Note: This mipmap size computation is inaccurate. */
2739 for (unsigned i = 0; i < in->numMipLevels; i++) {
2740 uint64_t level_size =
2741 (uint64_t)DIV_ROUND_UP(width, blk_w) * DIV_ROUND_UP(height, blk_h) * depth *
2742 in->numSamples * bpe;
2743
2744 size += level_size;
2745
2746 if (tile_size >= 4096 && level_size <= tile_size / 2) {
2747 /* We are likely in the mip tail, return. */
2748 assert(size);
2749 return size;
2750 }
2751
2752 /* Minify the level. */
2753 width = u_minify(width, 1);
2754 height = u_minify(height, 1);
2755 if (in->resourceType == ADDR_RSRC_TEX_3D)
2756 depth = u_minify(depth, 1);
2757 }
2758
2759 /* TODO: check that this is not too different from the correct value */
2760 assert(size);
2761 return size;
2762 }
2763
gfx12_select_swizzle_mode(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct radeon_surf * surf,const ADDR3_COMPUTE_SURFACE_INFO_INPUT * in)2764 static unsigned gfx12_select_swizzle_mode(struct ac_addrlib *addrlib,
2765 const struct radeon_info *info,
2766 const struct radeon_surf *surf,
2767 const ADDR3_COMPUTE_SURFACE_INFO_INPUT *in)
2768 {
2769 ADDR3_GET_POSSIBLE_SWIZZLE_MODE_INPUT get_in = {0};
2770 ADDR3_GET_POSSIBLE_SWIZZLE_MODE_OUTPUT get_out = {0};
2771
2772 get_in.size = sizeof(ADDR3_GET_POSSIBLE_SWIZZLE_MODE_INPUT);
2773 get_out.size = sizeof(ADDR3_GET_POSSIBLE_SWIZZLE_MODE_OUTPUT);
2774
2775 get_in.flags = in->flags;
2776 get_in.resourceType = in->resourceType;
2777 get_in.bpp = in->bpp ? in->bpp : (surf->bpe * 8);
2778 get_in.width = in->width;
2779 get_in.height = in->height;
2780 get_in.numSlices = in->numSlices;
2781 get_in.numMipLevels = in->numMipLevels;
2782 get_in.numSamples = in->numSamples;
2783
2784 if (surf->flags & RADEON_SURF_PREFER_4K_ALIGNMENT) {
2785 get_in.maxAlign = 4 * 1024;
2786 } else if (surf->flags & RADEON_SURF_PREFER_64K_ALIGNMENT) {
2787 get_in.maxAlign = 64 * 1024;
2788 } else {
2789 get_in.maxAlign = info->has_dedicated_vram ? (256 * 1024) : (64 * 1024);
2790 }
2791
2792 if (Addr3GetPossibleSwizzleModes(addrlib->handle, &get_in, &get_out) != ADDR_OK) {
2793 assert(!"Addr3GetPossibleSwizzleModes failed");
2794 return ADDR3_MAX_TYPE;
2795 }
2796
2797 /* TODO: Workaround for SW_LINEAR assertion failures in addrlib. This should be fixed in addrlib. */
2798 if (surf && surf->blk_w == 4)
2799 get_out.validModes.swLinear = 0;
2800
2801 assert(get_out.validModes.value);
2802
2803 unsigned bpe = in->bpp ? in->bpp / 8 : surf->bpe;
2804 unsigned log_bpp = util_logbase2(bpe);
2805 unsigned log_samples = util_logbase2(in->numSamples);
2806 uint64_t ideal_size = gfx12_estimate_size(in, surf, 1, 1, 1);
2807
2808 if (in->resourceType == ADDR_RSRC_TEX_3D) {
2809 static unsigned block3d_size_4K[5][3] = {
2810 {16, 16, 16},
2811 {8, 16, 16},
2812 {8, 16, 8},
2813 {8, 8, 8},
2814 {4, 8, 8},
2815 };
2816 static unsigned block3d_size_64K[5][3] = {
2817 {64, 32, 32},
2818 {32, 32, 32},
2819 {32, 32, 16},
2820 {32, 16, 16},
2821 {16, 16, 16},
2822 };
2823 static unsigned block3d_size_256K[5][3] = {
2824 {64, 64, 64},
2825 {32, 64, 64},
2826 {32, 64, 32},
2827 {32, 32, 32},
2828 {16, 32, 32},
2829 };
2830
2831 uint64_t size_4K = gfx12_estimate_size(in, surf, block3d_size_4K[log_bpp][0],
2832 block3d_size_4K[log_bpp][1],
2833 block3d_size_4K[log_bpp][2]);
2834
2835 uint64_t size_64K = gfx12_estimate_size(in, surf, block3d_size_64K[log_bpp][0],
2836 block3d_size_64K[log_bpp][1],
2837 block3d_size_64K[log_bpp][2]);
2838
2839 uint64_t size_256K = gfx12_estimate_size(in, surf, block3d_size_256K[log_bpp][0],
2840 block3d_size_256K[log_bpp][1],
2841 block3d_size_256K[log_bpp][2]);;
2842
2843 float max_3d_overalloc_256K = 1.1;
2844 float max_3d_overalloc_64K = 1.2;
2845 float max_3d_overalloc_4K = 2;
2846
2847 if (get_out.validModes.sw3d256kB &&
2848 (size_256K / (double)ideal_size <= max_3d_overalloc_256K || !get_out.validModes.sw3d64kB))
2849 return ADDR3_256KB_3D;
2850
2851 if (get_out.validModes.sw3d64kB &&
2852 (size_64K / (double)ideal_size <= max_3d_overalloc_64K || !get_out.validModes.sw3d4kB))
2853 return ADDR3_64KB_3D;
2854
2855 if (get_out.validModes.sw3d4kB &&
2856 (size_4K / (double)ideal_size <= max_3d_overalloc_4K ||
2857 /* If the image is thick, prefer thick tiling. */
2858 in->numSlices >= block3d_size_4K[log_bpp][2] * 3))
2859 return ADDR3_4KB_3D;
2860
2861 /* Try to select a 2D (planar) swizzle mode to save memory. */
2862 }
2863
2864 static unsigned block_size_LINEAR[5] = {
2865 /* 1xAA (MSAA not supported with LINEAR)
2866 *
2867 * The pitch alignment is 128B, but the slice size is computed as if the pitch alignment
2868 * was 256B.
2869 */
2870 256,
2871 128,
2872 64,
2873 32,
2874 16,
2875 };
2876 static unsigned block_size_256B[4][5][2] = {
2877 { /* 1xAA */
2878 {16, 16},
2879 {16, 8},
2880 {8, 8},
2881 {8, 4},
2882 {4, 4},
2883 },
2884 { /* 2xAA */
2885 {16, 8},
2886 {8, 8},
2887 {8, 4},
2888 {4, 4},
2889 {4, 2},
2890 },
2891 { /* 4xAA */
2892 {8, 8},
2893 {8, 4},
2894 {4, 4},
2895 {4, 2},
2896 {2, 2},
2897 },
2898 { /* 8xAA */
2899 {8, 4},
2900 {4, 4},
2901 {4, 2},
2902 {2, 2},
2903 {2, 1},
2904 },
2905 };
2906 static unsigned block_size_4K[4][5][2] = {
2907 { /* 1xAA */
2908 {64, 64},
2909 {64, 32},
2910 {32, 32},
2911 {32, 16},
2912 {16, 16},
2913 },
2914 { /* 2xAA */
2915 {64, 32},
2916 {32, 32},
2917 {32, 16},
2918 {16, 16},
2919 {16, 8},
2920 },
2921 { /* 4xAA */
2922 {32, 32},
2923 {32, 16},
2924 {16, 16},
2925 {16, 8},
2926 {8, 8},
2927 },
2928 { /* 8xAA */
2929 {32, 16},
2930 {16, 16},
2931 {16, 8},
2932 {8, 8},
2933 {8, 4},
2934 },
2935 };
2936 static unsigned block_size_64K[4][5][2] = {
2937 { /* 1xAA */
2938 {256, 256},
2939 {256, 128},
2940 {128, 128},
2941 {128, 64},
2942 {64, 64},
2943 },
2944 { /* 2xAA */
2945 {256, 128},
2946 {128, 128},
2947 {128, 64},
2948 {64, 64},
2949 {64, 32},
2950 },
2951 { /* 4xAA */
2952 {128, 128},
2953 {128, 64},
2954 {64, 64},
2955 {64, 32},
2956 {32, 32},
2957 },
2958 { /* 8xAA */
2959 {128, 64},
2960 {64, 64},
2961 {64, 32},
2962 {32, 32},
2963 {32, 16},
2964 },
2965 };
2966 static unsigned block_size_256K[4][5][2] = {
2967 { /* 1xAA */
2968 {512, 512},
2969 {512, 256},
2970 {256, 256},
2971 {256, 128},
2972 {128, 128},
2973 },
2974 { /* 2xAA */
2975 {512, 256},
2976 {256, 256},
2977 {256, 128},
2978 {128, 128},
2979 {128, 64},
2980 },
2981 { /* 4xAA */
2982 {256, 256},
2983 {256, 128},
2984 {128, 128},
2985 {128, 64},
2986 {64, 64},
2987 },
2988 { /* 8xAA */
2989 {256, 128},
2990 {128, 128},
2991 {128, 64},
2992 {64, 64},
2993 {64, 32},
2994 },
2995 };
2996
2997 uint64_t size_LINEAR = gfx12_estimate_size(in, surf, block_size_LINEAR[log_bpp], 1, 1);
2998
2999 uint64_t size_256B = gfx12_estimate_size(in, surf, block_size_256B[log_samples][log_bpp][0],
3000 block_size_256B[log_samples][log_bpp][1], 1);
3001
3002 uint64_t size_4K = gfx12_estimate_size(in, surf, block_size_4K[log_samples][log_bpp][0],
3003 block_size_4K[log_samples][log_bpp][1], 1);;
3004
3005 uint64_t size_64K = gfx12_estimate_size(in, surf, block_size_64K[log_samples][log_bpp][0],
3006 block_size_64K[log_samples][log_bpp][1], 1);
3007
3008 uint64_t size_256K = gfx12_estimate_size(in, surf, block_size_256K[log_samples][log_bpp][0],
3009 block_size_256K[log_samples][log_bpp][1], 1);
3010
3011 float max_2d_overalloc_256K = 1.1; /* relative to ideal */
3012 float max_2d_overalloc_64K = 1.3; /* relative to ideal */
3013 float max_2d_overalloc_4K = 2; /* relative to ideal */
3014 float max_2d_overalloc_256B = 3; /* relative to LINEAR */
3015
3016 if (get_out.validModes.sw2d256kB &&
3017 (size_256K / (double)ideal_size <= max_2d_overalloc_256K || !get_out.validModes.sw2d64kB))
3018 return ADDR3_256KB_2D;
3019
3020 if (get_out.validModes.sw2d64kB &&
3021 (size_64K / (double)ideal_size <= max_2d_overalloc_64K || !get_out.validModes.sw2d4kB))
3022 return ADDR3_64KB_2D;
3023
3024 if (get_out.validModes.sw2d4kB &&
3025 (size_4K / (double)ideal_size <= max_2d_overalloc_4K ||
3026 (!get_out.validModes.sw2d256B && !get_out.validModes.swLinear)))
3027 return ADDR3_4KB_2D;
3028
3029 assert(get_out.validModes.sw2d256B || get_out.validModes.swLinear);
3030
3031 if (get_out.validModes.sw2d256B && get_out.validModes.swLinear)
3032 return size_256B / (double)size_LINEAR <= max_2d_overalloc_256B ? ADDR3_256B_2D : ADDR3_LINEAR;
3033 else if (get_out.validModes.sw2d256B)
3034 return ADDR3_256B_2D;
3035 else
3036 return ADDR3_LINEAR;
3037 }
3038
gfx12_compute_hiz_his_info(struct ac_addrlib * addrlib,const struct radeon_info * info,struct radeon_surf * surf,struct gfx12_hiz_his_layout * hizs,const ADDR3_COMPUTE_SURFACE_INFO_INPUT * surf_in)3039 static bool gfx12_compute_hiz_his_info(struct ac_addrlib *addrlib, const struct radeon_info *info,
3040 struct radeon_surf *surf, struct gfx12_hiz_his_layout *hizs,
3041 const ADDR3_COMPUTE_SURFACE_INFO_INPUT *surf_in)
3042 {
3043 assert(surf_in->flags.depth != surf_in->flags.stencil);
3044
3045 if (surf->flags & RADEON_SURF_NO_HTILE || (info->gfx_level == GFX12 && info->chip_rev == 0))
3046 return true;
3047
3048 ADDR3_COMPUTE_SURFACE_INFO_OUTPUT out = {0};
3049 out.size = sizeof(ADDR3_COMPUTE_SURFACE_INFO_OUTPUT);
3050
3051 ADDR3_COMPUTE_SURFACE_INFO_INPUT in = *surf_in;
3052 in.flags.depth = 0;
3053 in.flags.stencil = 0;
3054 in.flags.hiZHiS = 1;
3055
3056 if (surf_in->flags.depth) {
3057 in.format = ADDR_FMT_32;
3058 in.bpp = 32;
3059 } else {
3060 in.format = ADDR_FMT_16;
3061 in.bpp = 16;
3062 }
3063
3064 /* Compute the HiZ/HiS size. */
3065 in.width = align(DIV_ROUND_UP(surf_in->width, 8), 2);
3066 in.height = align(DIV_ROUND_UP(surf_in->height, 8), 2);
3067 in.swizzleMode = gfx12_select_swizzle_mode(addrlib, info, NULL, &in);
3068
3069 int ret = Addr3ComputeSurfaceInfo(addrlib->handle, &in, &out);
3070 if (ret != ADDR_OK)
3071 return false;
3072
3073 hizs->size = out.surfSize;
3074 hizs->width_in_tiles = in.width;
3075 hizs->height_in_tiles = in.height;
3076 hizs->swizzle_mode = in.swizzleMode;
3077 hizs->alignment_log2 = out.baseAlign;
3078 return true;
3079 }
3080
gfx12_compute_miptree(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct ac_surf_config * config,struct radeon_surf * surf,bool compressed,ADDR3_COMPUTE_SURFACE_INFO_INPUT * in)3081 static bool gfx12_compute_miptree(struct ac_addrlib *addrlib, const struct radeon_info *info,
3082 const struct ac_surf_config *config, struct radeon_surf *surf,
3083 bool compressed, ADDR3_COMPUTE_SURFACE_INFO_INPUT *in)
3084 {
3085 ADDR3_MIP_INFO mip_info[RADEON_SURF_MAX_LEVELS] = {0};
3086 ADDR3_COMPUTE_SURFACE_INFO_OUTPUT out = {0};
3087 ADDR_E_RETURNCODE ret;
3088
3089 out.size = sizeof(ADDR3_COMPUTE_SURFACE_INFO_OUTPUT);
3090 out.pMipInfo = mip_info;
3091
3092 ret = Addr3ComputeSurfaceInfo(addrlib->handle, in, &out);
3093 if (ret != ADDR_OK)
3094 return false;
3095
3096 /* TODO: remove this block once addrlib stops giving us 64K pitch for small images, breaking
3097 * modifiers and X.Org.
3098 */
3099 if (in->swizzleMode >= ADDR3_256B_2D && in->swizzleMode <= ADDR3_256KB_2D &&
3100 in->numMipLevels == 1) {
3101 static unsigned block_bits[ADDR3_MAX_TYPE] = {
3102 [ADDR3_256B_2D] = 8,
3103 [ADDR3_4KB_2D] = 12,
3104 [ADDR3_64KB_2D] = 16,
3105 [ADDR3_256KB_2D] = 18,
3106 };
3107 unsigned align_bits = block_bits[in->swizzleMode] - util_logbase2(surf->bpe);
3108 unsigned w_align = 1 << (align_bits / 2 + align_bits % 2);
3109
3110 out.pitch = align(in->width, w_align);
3111 }
3112
3113 if (in->flags.stencil) {
3114 surf->u.gfx9.zs.stencil_swizzle_mode = in->swizzleMode;
3115 surf->u.gfx9.zs.stencil_offset = align(surf->surf_size, out.baseAlign);
3116 surf->surf_alignment_log2 = MAX2(surf->surf_alignment_log2, util_logbase2(out.baseAlign));
3117 surf->surf_size = surf->u.gfx9.zs.stencil_offset + out.surfSize;
3118
3119 if (info->chip_rev >= 2 &&
3120 !gfx12_compute_hiz_his_info(addrlib, info, surf, &surf->u.gfx9.zs.his, in))
3121 return false;
3122
3123 return true;
3124 }
3125
3126 surf->u.gfx9.surf_slice_size = out.sliceSize;
3127 surf->u.gfx9.surf_pitch = out.pitch;
3128 surf->u.gfx9.surf_height = out.height;
3129 surf->surf_size = out.surfSize;
3130 surf->surf_alignment_log2 = util_logbase2(out.baseAlign);
3131
3132 if (surf->flags & RADEON_SURF_PRT) {
3133 surf->prt_tile_width = out.blockExtent.width;
3134 surf->prt_tile_height = out.blockExtent.height;
3135 surf->prt_tile_depth = out.blockExtent.depth;
3136 surf->first_mip_tail_level = out.firstMipIdInTail;
3137
3138 for (unsigned i = 0; i < in->numMipLevels; i++) {
3139 surf->u.gfx9.prt_level_offset[i] = mip_info[i].macroBlockOffset + mip_info[i].mipTailOffset;
3140 surf->u.gfx9.prt_level_pitch[i] = mip_info[i].pitch;
3141 }
3142 }
3143
3144 if (surf->blk_w == 2 && out.pitch == out.pixelPitch &&
3145 surf->u.gfx9.swizzle_mode == ADDR3_LINEAR) {
3146 const unsigned linear_byte_alignment = 128;
3147
3148 /* Adjust surf_pitch to be in elements units not in pixels */
3149 surf->u.gfx9.surf_pitch = align(surf->u.gfx9.surf_pitch / surf->blk_w,
3150 linear_byte_alignment / surf->bpe);
3151 /* The surface is really a surf->bpe bytes per pixel surface even if we
3152 * use it as a surf->bpe bytes per element one.
3153 * Adjust surf_slice_size and surf_size to reflect the change
3154 * made to surf_pitch.
3155 */
3156 surf->u.gfx9.surf_slice_size =
3157 MAX2(surf->u.gfx9.surf_slice_size,
3158 (uint64_t)surf->u.gfx9.surf_pitch * out.height * surf->bpe * surf->blk_w);
3159 surf->surf_size = surf->u.gfx9.surf_slice_size * in->numSlices;
3160
3161 int alignment = linear_byte_alignment / surf->bpe;
3162 for (unsigned i = 0; i < in->numMipLevels; i++) {
3163 surf->u.gfx9.offset[i] = mip_info[i].offset;
3164 /* Adjust pitch like we did for surf_pitch */
3165 surf->u.gfx9.pitch[i] = align(mip_info[i].pitch / surf->blk_w, alignment);
3166 }
3167 surf->u.gfx9.base_mip_width = surf->u.gfx9.surf_pitch;
3168 } else if (in->swizzleMode == ADDR3_LINEAR) {
3169 for (unsigned i = 0; i < in->numMipLevels; i++) {
3170 surf->u.gfx9.offset[i] = mip_info[i].offset;
3171 surf->u.gfx9.pitch[i] = mip_info[i].pitch;
3172 }
3173 surf->u.gfx9.base_mip_width = surf->u.gfx9.surf_pitch;
3174 } else {
3175 surf->u.gfx9.base_mip_width = mip_info[0].pitch;
3176 }
3177
3178 surf->u.gfx9.base_mip_height = mip_info[0].height;
3179
3180 if (in->flags.depth) {
3181 assert(in->swizzleMode != ADDR3_LINEAR);
3182
3183 return gfx12_compute_hiz_his_info(addrlib, info, surf, &surf->u.gfx9.zs.hiz, in);
3184 }
3185
3186 /* Compute tile swizzle for the color surface. All swizzle modes >= 4K support it. */
3187 if (surf->modifier == DRM_FORMAT_MOD_INVALID && config->info.surf_index &&
3188 in->swizzleMode >= ADDR3_4KB_2D && !out.mipChainInTail &&
3189 !(surf->flags & RADEON_SURF_SHAREABLE) && !get_display_flag(config, surf)) {
3190 ADDR3_COMPUTE_PIPEBANKXOR_INPUT xin = {0};
3191 ADDR3_COMPUTE_PIPEBANKXOR_OUTPUT xout = {0};
3192
3193 xin.size = sizeof(ADDR3_COMPUTE_PIPEBANKXOR_INPUT);
3194 xout.size = sizeof(ADDR3_COMPUTE_PIPEBANKXOR_OUTPUT);
3195
3196 xin.surfIndex = p_atomic_inc_return(config->info.surf_index) - 1;
3197 xin.swizzleMode = in->swizzleMode;
3198
3199 ret = Addr3ComputePipeBankXor(addrlib->handle, &xin, &xout);
3200 if (ret != ADDR_OK)
3201 return false;
3202
3203 assert(xout.pipeBankXor <= u_bit_consecutive(0, sizeof(surf->tile_swizzle) * 8 + 2));
3204 surf->tile_swizzle = xout.pipeBankXor;
3205 }
3206
3207 return true;
3208 }
3209
gfx12_compute_surface(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct ac_surf_config * config,enum radeon_surf_mode mode,struct radeon_surf * surf)3210 static bool gfx12_compute_surface(struct ac_addrlib *addrlib, const struct radeon_info *info,
3211 const struct ac_surf_config *config, enum radeon_surf_mode mode,
3212 struct radeon_surf *surf)
3213 {
3214 bool compressed = surf->blk_w == 4 && surf->blk_h == 4;
3215 bool stencil_only = (surf->flags & RADEON_SURF_SBUFFER) && !(surf->flags & RADEON_SURF_ZBUFFER);
3216 ADDR3_COMPUTE_SURFACE_INFO_INPUT AddrSurfInfoIn = {0};
3217
3218 AddrSurfInfoIn.size = sizeof(ADDR3_COMPUTE_SURFACE_INFO_INPUT);
3219
3220 if (stencil_only) {
3221 AddrSurfInfoIn.bpp = 8;
3222 AddrSurfInfoIn.format = ADDR_FMT_8;
3223 } else {
3224 AddrSurfInfoIn.format = bpe_to_format(surf);
3225 if (!compressed)
3226 AddrSurfInfoIn.bpp = surf->bpe * 8;
3227 }
3228
3229 AddrSurfInfoIn.flags.depth = !!(surf->flags & RADEON_SURF_ZBUFFER);
3230 AddrSurfInfoIn.flags.stencil = stencil_only;
3231 AddrSurfInfoIn.flags.blockCompressed = compressed;
3232 AddrSurfInfoIn.flags.isVrsImage = !!(surf->flags & RADEON_SURF_VRS_RATE);
3233 AddrSurfInfoIn.flags.standardPrt = !!(surf->flags & RADEON_SURF_PRT);
3234
3235 if (config->is_3d)
3236 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_3D;
3237 else if (config->is_1d)
3238 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_1D;
3239 else
3240 AddrSurfInfoIn.resourceType = ADDR_RSRC_TEX_2D;
3241
3242 AddrSurfInfoIn.width = config->info.width;
3243 AddrSurfInfoIn.height = config->info.height;
3244 AddrSurfInfoIn.numMipLevels = config->info.levels;
3245 AddrSurfInfoIn.numSamples = MAX2(1, config->info.samples);
3246
3247 if (config->is_3d)
3248 AddrSurfInfoIn.numSlices = config->info.depth;
3249 else if (config->is_cube)
3250 AddrSurfInfoIn.numSlices = 6;
3251 else
3252 AddrSurfInfoIn.numSlices = config->info.array_size;
3253
3254 /* Select the swizzle mode. */
3255 if (surf->modifier != DRM_FORMAT_MOD_INVALID) {
3256 assert(!compressed);
3257 assert(!ac_modifier_has_dcc(surf->modifier) || !(surf->flags & RADEON_SURF_DISABLE_DCC));
3258 AddrSurfInfoIn.swizzleMode = ac_get_modifier_swizzle_mode(info->gfx_level, surf->modifier);
3259 } else if (surf->flags & RADEON_SURF_IMPORTED) {
3260 AddrSurfInfoIn.swizzleMode = surf->u.gfx9.swizzle_mode;
3261 } else if (mode == RADEON_SURF_MODE_LINEAR_ALIGNED) {
3262 assert(config->info.samples <= 1 && !(surf->flags & RADEON_SURF_Z_OR_SBUFFER));
3263 AddrSurfInfoIn.swizzleMode = ADDR3_LINEAR;
3264 } else if (config->is_1d && !(surf->flags & RADEON_SURF_Z_OR_SBUFFER)) {
3265 AddrSurfInfoIn.swizzleMode = ADDR3_LINEAR;
3266 } else if (surf->flags & RADEON_SURF_VIDEO_REFERENCE) {
3267 AddrSurfInfoIn.swizzleMode = ADDR3_256B_2D;
3268 } else {
3269 AddrSurfInfoIn.swizzleMode = gfx12_select_swizzle_mode(addrlib, info, surf, &AddrSurfInfoIn);
3270 }
3271
3272 /* Force the linear pitch from 128B (default) to 256B for multi-GPU interop. This only applies
3273 * to 2D non-MSAA and plain color formats.
3274 */
3275 if (!config->is_1d && !config->is_3d && !config->is_cube && !config->is_array &&
3276 config->info.levels == 1 && config->info.samples <= 1 &&
3277 surf->blk_w == 1 && surf->blk_h == 1 && !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
3278 util_is_power_of_two_nonzero(surf->bpe) && AddrSurfInfoIn.swizzleMode == ADDR3_LINEAR) {
3279 AddrSurfInfoIn.pitchInElement = align(config->info.width, LINEAR_PITCH_ALIGNMENT / surf->bpe);
3280 surf->u.gfx9.uses_custom_pitch = true;
3281 }
3282
3283 bool supports_display_dcc = info->drm_minor >= 58;
3284 surf->u.gfx9.swizzle_mode = AddrSurfInfoIn.swizzleMode;
3285 surf->u.gfx9.resource_type = (enum gfx9_resource_type)AddrSurfInfoIn.resourceType;
3286 surf->u.gfx9.gfx12_enable_dcc = ac_modifier_has_dcc(surf->modifier) ||
3287 (surf->modifier == DRM_FORMAT_MOD_INVALID &&
3288 !(surf->flags & RADEON_SURF_DISABLE_DCC) &&
3289 /* Always enable compression for Z/S and MSAA color by default. */
3290 (surf->flags & RADEON_SURF_Z_OR_SBUFFER ||
3291 config->info.samples > 1 ||
3292 ((supports_display_dcc || !(surf->flags & RADEON_SURF_SCANOUT)) &&
3293 /* This one is not strictly necessary. */
3294 surf->u.gfx9.swizzle_mode != ADDR3_LINEAR)));
3295
3296 surf->has_stencil = !!(surf->flags & RADEON_SURF_SBUFFER);
3297 surf->is_linear = surf->u.gfx9.swizzle_mode == ADDR3_LINEAR;
3298 surf->is_displayable = !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
3299 surf->u.gfx9.resource_type != RADEON_RESOURCE_3D &&
3300 (supports_display_dcc || !surf->u.gfx9.gfx12_enable_dcc);
3301 surf->thick_tiling = surf->u.gfx9.swizzle_mode >= ADDR3_4KB_3D;
3302
3303 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER) {
3304 surf->u.gfx9.zs.hiz.offset = 0;
3305 surf->u.gfx9.zs.hiz.size = 0;
3306 surf->u.gfx9.zs.his.offset = 0;
3307 surf->u.gfx9.zs.his.size = 0;
3308 }
3309
3310 if (surf->u.gfx9.gfx12_enable_dcc) {
3311 if (surf->modifier != DRM_FORMAT_MOD_INVALID) {
3312 surf->u.gfx9.color.dcc.max_compressed_block_size =
3313 AMD_FMT_MOD_GET(DCC_MAX_COMPRESSED_BLOCK, surf->modifier);
3314 } else if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
3315 /* Don't change the DCC settings for imported buffers - they might differ. */
3316 !(surf->flags & RADEON_SURF_IMPORTED)) {
3317 surf->u.gfx9.color.dcc.max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_256B;
3318 }
3319 }
3320
3321 /* Calculate texture layout information. */
3322 if (!stencil_only &&
3323 !gfx12_compute_miptree(addrlib, info, config, surf, compressed, &AddrSurfInfoIn))
3324 return false;
3325
3326 /* Calculate texture layout information for stencil. */
3327 if (surf->flags & RADEON_SURF_SBUFFER) {
3328 if (stencil_only) {
3329 assert(!AddrSurfInfoIn.flags.depth);
3330 assert(AddrSurfInfoIn.flags.stencil);
3331 assert(AddrSurfInfoIn.bpp == 8);
3332 assert(AddrSurfInfoIn.format == ADDR_FMT_8);
3333 } else {
3334 AddrSurfInfoIn.flags.depth = 0;
3335 AddrSurfInfoIn.flags.stencil = 1;
3336 AddrSurfInfoIn.bpp = 8;
3337 AddrSurfInfoIn.format = ADDR_FMT_8;
3338 }
3339
3340 if (!gfx12_compute_miptree(addrlib, info, config, surf, compressed, &AddrSurfInfoIn))
3341 return false;
3342 }
3343
3344 return true;
3345 }
3346
ac_compute_surface(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct ac_surf_config * config,enum radeon_surf_mode mode,struct radeon_surf * surf)3347 int ac_compute_surface(struct ac_addrlib *addrlib, const struct radeon_info *info,
3348 const struct ac_surf_config *config, enum radeon_surf_mode mode,
3349 struct radeon_surf *surf)
3350 {
3351 int r;
3352
3353 r = surf_config_sanity(config, surf->flags);
3354 if (r)
3355 return r;
3356
3357 /* Images are emulated on some CDNA chips. */
3358 if (!info->has_image_opcodes && !(surf->flags & RADEON_SURF_VIDEO_REFERENCE))
3359 mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
3360
3361 /* 0 offsets mean disabled. */
3362 surf->meta_offset = surf->fmask_offset = surf->cmask_offset = surf->display_dcc_offset = 0;
3363
3364 if (info->family_id >= FAMILY_GFX12) {
3365 if (!gfx12_compute_surface(addrlib, info, config, mode, surf))
3366 return ADDR_ERROR;
3367
3368 /* Determine the memory layout of multiple allocations in one buffer. */
3369 surf->total_size = surf->surf_size;
3370 surf->alignment_log2 = surf->surf_alignment_log2;
3371
3372 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER) {
3373 if (surf->u.gfx9.zs.hiz.size) {
3374 surf->u.gfx9.zs.hiz.offset = align64(surf->total_size,
3375 1ull << surf->u.gfx9.zs.hiz.alignment_log2);
3376 surf->surf_alignment_log2 = MAX2(surf->surf_alignment_log2,
3377 surf->u.gfx9.zs.hiz.alignment_log2);
3378 surf->total_size = surf->u.gfx9.zs.hiz.offset + surf->u.gfx9.zs.hiz.size;
3379 }
3380
3381 if (surf->u.gfx9.zs.his.size) {
3382 surf->u.gfx9.zs.his.offset = align64(surf->total_size,
3383 1ull << surf->u.gfx9.zs.his.alignment_log2);
3384 surf->surf_alignment_log2 = MAX2(surf->surf_alignment_log2,
3385 surf->u.gfx9.zs.his.alignment_log2);
3386 surf->total_size = surf->u.gfx9.zs.his.offset + surf->u.gfx9.zs.his.size;
3387 }
3388 }
3389
3390 return 0;
3391 }
3392
3393 /* Gfx6-11. */
3394 if (info->family_id >= FAMILY_AI)
3395 r = gfx9_compute_surface(addrlib, info, config, mode, surf);
3396 else
3397 r = gfx6_compute_surface(addrlib->handle, info, config, mode, surf);
3398
3399 if (r)
3400 return r;
3401
3402 /* Determine the memory layout of multiple allocations in one buffer. */
3403 surf->total_size = surf->surf_size;
3404 surf->alignment_log2 = surf->surf_alignment_log2;
3405
3406 if (surf->fmask_size) {
3407 assert(config->info.samples >= 2);
3408 surf->fmask_offset = align64(surf->total_size, 1ull << surf->fmask_alignment_log2);
3409 surf->total_size = surf->fmask_offset + surf->fmask_size;
3410 surf->alignment_log2 = MAX2(surf->alignment_log2, surf->fmask_alignment_log2);
3411 }
3412
3413 /* Single-sample CMASK is in a separate buffer. */
3414 if (surf->cmask_size && config->info.samples >= 2) {
3415 surf->cmask_offset = align64(surf->total_size, 1ull << surf->cmask_alignment_log2);
3416 surf->total_size = surf->cmask_offset + surf->cmask_size;
3417 surf->alignment_log2 = MAX2(surf->alignment_log2, surf->cmask_alignment_log2);
3418 }
3419
3420 if (surf->is_displayable)
3421 surf->flags |= RADEON_SURF_SCANOUT;
3422
3423 if (surf->meta_size &&
3424 /* dcc_size is computed on GFX9+ only if it's displayable. */
3425 (info->gfx_level >= GFX9 || !get_display_flag(config, surf))) {
3426 /* It's better when displayable DCC is immediately after
3427 * the image due to hw-specific reasons.
3428 */
3429 if (info->gfx_level >= GFX9 &&
3430 !(surf->flags & RADEON_SURF_Z_OR_SBUFFER) &&
3431 surf->u.gfx9.color.dcc.display_equation_valid) {
3432 /* Add space for the displayable DCC buffer. */
3433 surf->display_dcc_offset = align64(surf->total_size, 1ull << surf->u.gfx9.color.display_dcc_alignment_log2);
3434 surf->total_size = surf->display_dcc_offset + surf->u.gfx9.color.display_dcc_size;
3435 }
3436
3437 surf->meta_offset = align64(surf->total_size, 1ull << surf->meta_alignment_log2);
3438 surf->total_size = surf->meta_offset + surf->meta_size;
3439 surf->alignment_log2 = MAX2(surf->alignment_log2, surf->meta_alignment_log2);
3440 }
3441
3442 return 0;
3443 }
3444
3445 /* This is meant to be used for disabling DCC. */
ac_surface_zero_dcc_fields(struct radeon_surf * surf)3446 void ac_surface_zero_dcc_fields(struct radeon_surf *surf)
3447 {
3448 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER)
3449 return;
3450
3451 surf->meta_offset = 0;
3452 surf->display_dcc_offset = 0;
3453 if (!surf->fmask_offset && !surf->cmask_offset) {
3454 surf->total_size = surf->surf_size;
3455 surf->alignment_log2 = surf->surf_alignment_log2;
3456 }
3457 }
3458
eg_tile_split(unsigned tile_split)3459 static unsigned eg_tile_split(unsigned tile_split)
3460 {
3461 switch (tile_split) {
3462 case 0:
3463 tile_split = 64;
3464 break;
3465 case 1:
3466 tile_split = 128;
3467 break;
3468 case 2:
3469 tile_split = 256;
3470 break;
3471 case 3:
3472 tile_split = 512;
3473 break;
3474 default:
3475 case 4:
3476 tile_split = 1024;
3477 break;
3478 case 5:
3479 tile_split = 2048;
3480 break;
3481 case 6:
3482 tile_split = 4096;
3483 break;
3484 }
3485 return tile_split;
3486 }
3487
eg_tile_split_rev(unsigned eg_tile_split)3488 static unsigned eg_tile_split_rev(unsigned eg_tile_split)
3489 {
3490 switch (eg_tile_split) {
3491 case 64:
3492 return 0;
3493 case 128:
3494 return 1;
3495 case 256:
3496 return 2;
3497 case 512:
3498 return 3;
3499 default:
3500 case 1024:
3501 return 4;
3502 case 2048:
3503 return 5;
3504 case 4096:
3505 return 6;
3506 }
3507 }
3508
3509 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_SHIFT 45
3510 #define AMDGPU_TILING_DCC_MAX_COMPRESSED_BLOCK_SIZE_MASK 0x3
3511
3512 /* This should be called before ac_compute_surface. */
ac_surface_apply_bo_metadata(enum amd_gfx_level gfx_level,struct radeon_surf * surf,uint64_t tiling_flags,enum radeon_surf_mode * mode)3513 void ac_surface_apply_bo_metadata(enum amd_gfx_level gfx_level, struct radeon_surf *surf,
3514 uint64_t tiling_flags, enum radeon_surf_mode *mode)
3515 {
3516 bool scanout;
3517
3518 if (gfx_level >= GFX12) {
3519 surf->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, GFX12_SWIZZLE_MODE);
3520 surf->u.gfx9.color.dcc.max_compressed_block_size =
3521 AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_MAX_COMPRESSED_BLOCK);
3522 surf->u.gfx9.color.dcc_data_format =
3523 AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_DATA_FORMAT);
3524 surf->u.gfx9.color.dcc_number_type =
3525 AMDGPU_TILING_GET(tiling_flags, GFX12_DCC_NUMBER_TYPE);
3526 scanout = AMDGPU_TILING_GET(tiling_flags, GFX12_SCANOUT);
3527 } else if (gfx_level >= GFX9) {
3528 surf->u.gfx9.swizzle_mode = AMDGPU_TILING_GET(tiling_flags, SWIZZLE_MODE);
3529 surf->u.gfx9.color.dcc.independent_64B_blocks =
3530 AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_64B);
3531 surf->u.gfx9.color.dcc.independent_128B_blocks =
3532 AMDGPU_TILING_GET(tiling_flags, DCC_INDEPENDENT_128B);
3533 surf->u.gfx9.color.dcc.max_compressed_block_size =
3534 AMDGPU_TILING_GET(tiling_flags, DCC_MAX_COMPRESSED_BLOCK_SIZE);
3535 surf->u.gfx9.color.display_dcc_pitch_max = AMDGPU_TILING_GET(tiling_flags, DCC_PITCH_MAX);
3536 scanout = AMDGPU_TILING_GET(tiling_flags, SCANOUT);
3537 *mode =
3538 surf->u.gfx9.swizzle_mode > 0 ? RADEON_SURF_MODE_2D : RADEON_SURF_MODE_LINEAR_ALIGNED;
3539 } else {
3540 surf->u.legacy.pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
3541 surf->u.legacy.bankw = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
3542 surf->u.legacy.bankh = 1 << AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
3543 surf->u.legacy.tile_split = eg_tile_split(AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT));
3544 surf->u.legacy.mtilea = 1 << AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
3545 surf->u.legacy.num_banks = 2 << AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
3546 scanout = AMDGPU_TILING_GET(tiling_flags, MICRO_TILE_MODE) == 0; /* DISPLAY */
3547
3548 if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 4) /* 2D_TILED_THIN1 */
3549 *mode = RADEON_SURF_MODE_2D;
3550 else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == 2) /* 1D_TILED_THIN1 */
3551 *mode = RADEON_SURF_MODE_1D;
3552 else
3553 *mode = RADEON_SURF_MODE_LINEAR_ALIGNED;
3554 }
3555
3556 if (scanout)
3557 surf->flags |= RADEON_SURF_SCANOUT;
3558 else
3559 surf->flags &= ~RADEON_SURF_SCANOUT;
3560 }
3561
ac_surface_compute_bo_metadata(const struct radeon_info * info,struct radeon_surf * surf,uint64_t * tiling_flags)3562 void ac_surface_compute_bo_metadata(const struct radeon_info *info, struct radeon_surf *surf,
3563 uint64_t *tiling_flags)
3564 {
3565 *tiling_flags = 0;
3566
3567 if (info->gfx_level >= GFX12) {
3568 *tiling_flags |= AMDGPU_TILING_SET(GFX12_SWIZZLE_MODE, surf->u.gfx9.swizzle_mode);
3569 *tiling_flags |= AMDGPU_TILING_SET(GFX12_DCC_MAX_COMPRESSED_BLOCK,
3570 surf->u.gfx9.color.dcc.max_compressed_block_size);
3571 *tiling_flags |= AMDGPU_TILING_SET(GFX12_DCC_NUMBER_TYPE, surf->u.gfx9.color.dcc_number_type);
3572 *tiling_flags |= AMDGPU_TILING_SET(GFX12_DCC_DATA_FORMAT, surf->u.gfx9.color.dcc_data_format);
3573 *tiling_flags |= AMDGPU_TILING_SET(GFX12_SCANOUT, (surf->flags & RADEON_SURF_SCANOUT) != 0);
3574 } else if (info->gfx_level >= GFX9) {
3575 uint64_t dcc_offset = 0;
3576
3577 if (surf->meta_offset) {
3578 dcc_offset = surf->display_dcc_offset ? surf->display_dcc_offset : surf->meta_offset;
3579 assert((dcc_offset >> 8) != 0 && (dcc_offset >> 8) < (1 << 24));
3580 }
3581
3582 *tiling_flags |= AMDGPU_TILING_SET(SWIZZLE_MODE, surf->u.gfx9.swizzle_mode);
3583 *tiling_flags |= AMDGPU_TILING_SET(DCC_OFFSET_256B, dcc_offset >> 8);
3584 *tiling_flags |= AMDGPU_TILING_SET(DCC_PITCH_MAX, surf->u.gfx9.color.display_dcc_pitch_max);
3585 *tiling_flags |=
3586 AMDGPU_TILING_SET(DCC_INDEPENDENT_64B, surf->u.gfx9.color.dcc.independent_64B_blocks);
3587 *tiling_flags |=
3588 AMDGPU_TILING_SET(DCC_INDEPENDENT_128B, surf->u.gfx9.color.dcc.independent_128B_blocks);
3589 *tiling_flags |= AMDGPU_TILING_SET(DCC_MAX_COMPRESSED_BLOCK_SIZE,
3590 surf->u.gfx9.color.dcc.max_compressed_block_size);
3591 *tiling_flags |= AMDGPU_TILING_SET(SCANOUT, (surf->flags & RADEON_SURF_SCANOUT) != 0);
3592 } else {
3593 if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D)
3594 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 4); /* 2D_TILED_THIN1 */
3595 else if (surf->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D)
3596 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 2); /* 1D_TILED_THIN1 */
3597 else
3598 *tiling_flags |= AMDGPU_TILING_SET(ARRAY_MODE, 1); /* LINEAR_ALIGNED */
3599
3600 *tiling_flags |= AMDGPU_TILING_SET(PIPE_CONFIG, surf->u.legacy.pipe_config);
3601 *tiling_flags |= AMDGPU_TILING_SET(BANK_WIDTH, util_logbase2(surf->u.legacy.bankw));
3602 *tiling_flags |= AMDGPU_TILING_SET(BANK_HEIGHT, util_logbase2(surf->u.legacy.bankh));
3603 if (surf->u.legacy.tile_split)
3604 *tiling_flags |=
3605 AMDGPU_TILING_SET(TILE_SPLIT, eg_tile_split_rev(surf->u.legacy.tile_split));
3606 *tiling_flags |= AMDGPU_TILING_SET(MACRO_TILE_ASPECT, util_logbase2(surf->u.legacy.mtilea));
3607 *tiling_flags |= AMDGPU_TILING_SET(NUM_BANKS, util_logbase2(surf->u.legacy.num_banks) - 1);
3608
3609 if (surf->flags & RADEON_SURF_SCANOUT)
3610 *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 0); /* DISPLAY_MICRO_TILING */
3611 else
3612 *tiling_flags |= AMDGPU_TILING_SET(MICRO_TILE_MODE, 1); /* THIN_MICRO_TILING */
3613 }
3614 }
3615
ac_get_umd_metadata_word1(const struct radeon_info * info)3616 static uint32_t ac_get_umd_metadata_word1(const struct radeon_info *info)
3617 {
3618 return (ATI_VENDOR_ID << 16) | info->pci_id;
3619 }
3620
3621 /* This should be called after ac_compute_surface. */
ac_surface_apply_umd_metadata(const struct radeon_info * info,struct radeon_surf * surf,unsigned num_storage_samples,unsigned num_mipmap_levels,unsigned size_metadata,const uint32_t metadata[64])3622 bool ac_surface_apply_umd_metadata(const struct radeon_info *info, struct radeon_surf *surf,
3623 unsigned num_storage_samples, unsigned num_mipmap_levels,
3624 unsigned size_metadata, const uint32_t metadata[64])
3625 {
3626 const uint32_t *desc = &metadata[2];
3627 uint64_t offset;
3628
3629 if (surf->modifier != DRM_FORMAT_MOD_INVALID)
3630 return true;
3631
3632 if (info->gfx_level >= GFX9)
3633 offset = surf->u.gfx9.surf_offset;
3634 else
3635 offset = (uint64_t)surf->u.legacy.level[0].offset_256B * 256;
3636
3637 if (offset || /* Non-zero planes ignore metadata. */
3638 size_metadata < 10 * 4 || /* at least 2(header) + 8(desc) dwords */
3639 metadata[0] == 0 || /* invalid version number (1 and 2 layouts are compatible) */
3640 metadata[1] != ac_get_umd_metadata_word1(info)) /* invalid PCI ID */ {
3641 /* Disable DCC because it might not be enabled. */
3642 ac_surface_zero_dcc_fields(surf);
3643
3644 /* Don't report an error if the texture comes from an incompatible driver,
3645 * but this might not work.
3646 */
3647 return true;
3648 }
3649
3650 /* Validate that sample counts and the number of mipmap levels match. */
3651 unsigned desc_last_level = info->gfx_level >= GFX12 ? G_00A00C_LAST_LEVEL_GFX12(desc[3])
3652 : G_008F1C_LAST_LEVEL(desc[3]);
3653 unsigned type = G_008F1C_TYPE(desc[3]);
3654
3655 if (type == V_008F1C_SQ_RSRC_IMG_2D_MSAA || type == V_008F1C_SQ_RSRC_IMG_2D_MSAA_ARRAY) {
3656 unsigned log_samples = util_logbase2(MAX2(1, num_storage_samples));
3657
3658 if (desc_last_level != log_samples) {
3659 fprintf(stderr,
3660 "amdgpu: invalid MSAA texture import, "
3661 "metadata has log2(samples) = %u, the caller set %u\n",
3662 desc_last_level, log_samples);
3663 return false;
3664 }
3665 } else {
3666 if (desc_last_level != num_mipmap_levels - 1) {
3667 fprintf(stderr,
3668 "amdgpu: invalid mipmapped texture import, "
3669 "metadata has last_level = %u, the caller set %u\n",
3670 desc_last_level, num_mipmap_levels - 1);
3671 return false;
3672 }
3673 }
3674
3675 if (info->gfx_level >= GFX8 && info->gfx_level < GFX12 && G_008F28_COMPRESSION_EN(desc[6])) {
3676 /* Read DCC information. */
3677 switch (info->gfx_level) {
3678 case GFX8:
3679 surf->meta_offset = (uint64_t)desc[7] << 8;
3680 break;
3681
3682 case GFX9:
3683 surf->meta_offset =
3684 ((uint64_t)desc[7] << 8) | ((uint64_t)G_008F24_META_DATA_ADDRESS(desc[5]) << 40);
3685 surf->u.gfx9.color.dcc.pipe_aligned = G_008F24_META_PIPE_ALIGNED(desc[5]);
3686 surf->u.gfx9.color.dcc.rb_aligned = G_008F24_META_RB_ALIGNED(desc[5]);
3687
3688 /* If DCC is unaligned, this can only be a displayable image. */
3689 if (!surf->u.gfx9.color.dcc.pipe_aligned && !surf->u.gfx9.color.dcc.rb_aligned)
3690 assert(surf->is_displayable);
3691 break;
3692
3693 case GFX10:
3694 case GFX10_3:
3695 case GFX11:
3696 case GFX11_5:
3697 surf->meta_offset =
3698 ((uint64_t)G_00A018_META_DATA_ADDRESS_LO(desc[6]) << 8) | ((uint64_t)desc[7] << 16);
3699 surf->u.gfx9.color.dcc.pipe_aligned = G_00A018_META_PIPE_ALIGNED(desc[6]);
3700 break;
3701
3702 default:
3703 assert(0);
3704 return false;
3705 }
3706 } else {
3707 /* Disable DCC. dcc_offset is always set by texture_from_handle
3708 * and must be cleared here.
3709 */
3710 ac_surface_zero_dcc_fields(surf);
3711 }
3712
3713 return true;
3714 }
3715
ac_surface_compute_umd_metadata(const struct radeon_info * info,struct radeon_surf * surf,unsigned num_mipmap_levels,uint32_t desc[8],unsigned * size_metadata,uint32_t metadata[64],bool include_tool_md)3716 void ac_surface_compute_umd_metadata(const struct radeon_info *info, struct radeon_surf *surf,
3717 unsigned num_mipmap_levels, uint32_t desc[8],
3718 unsigned *size_metadata, uint32_t metadata[64],
3719 bool include_tool_md)
3720 {
3721 /* Clear the base address and set the relative DCC offset. */
3722 desc[0] = 0;
3723 desc[1] &= C_008F14_BASE_ADDRESS_HI;
3724
3725 switch (info->gfx_level) {
3726 case GFX6:
3727 case GFX7:
3728 break;
3729 case GFX8:
3730 desc[7] = surf->meta_offset >> 8;
3731 break;
3732 case GFX9:
3733 desc[7] = surf->meta_offset >> 8;
3734 desc[5] &= C_008F24_META_DATA_ADDRESS;
3735 desc[5] |= S_008F24_META_DATA_ADDRESS(surf->meta_offset >> 40);
3736 break;
3737 case GFX10:
3738 case GFX10_3:
3739 case GFX11:
3740 case GFX11_5:
3741 desc[6] &= C_00A018_META_DATA_ADDRESS_LO;
3742 desc[6] |= S_00A018_META_DATA_ADDRESS_LO(surf->meta_offset >> 8);
3743 desc[7] = surf->meta_offset >> 16;
3744 break;
3745 default: /* Gfx12 doesn't have any metadata address */
3746 break;
3747 }
3748
3749 /* Metadata image format format version 1 and 2. Version 2 uses the same layout as
3750 * version 1 with some additional fields (used if include_tool_md=true).
3751 * [0] = optional flags | metadata_format_identifier
3752 * [1] = (VENDOR_ID << 16) | PCI_ID
3753 * [2:9] = image descriptor for the whole resource
3754 * [2] is always 0, because the base address is cleared
3755 * [9] is the DCC offset bits [39:8] from the beginning of
3756 * the buffer
3757 * gfx8-: [10:10+LAST_LEVEL] = mipmap level offset bits [39:8] for each level (gfx8-)
3758 * ---- Optional data (if version == 2 or version > 2 + AC_SURF_METADATA_FLAG_EXTRA_MD_BIT)
3759 * AC_SURF_METADATA_FLAG_EXTRA_MD_BIT is set.
3760 * It shouldn't be used by the driver as it's only present to help
3761 * tools (eg: umr) that would want to access this buffer.
3762 * gfx9+ if valid modifier: [10:11] = modifier
3763 * [12:12+3*nplane] = [offset, stride]
3764 * else: [10]: stride
3765 * ---- Optional data (if version >= 3 + AC_SURF_METADATA_FLAG_FAMILY_OVERRIDEN_BIT)
3766 * [last] = fake family id
3767 */
3768
3769 /* metadata image format version */
3770 metadata[0] = (include_tool_md || info->family_overridden) ? 3 : 1;
3771
3772 if (include_tool_md)
3773 metadata[0] |= 1u << (16 + AC_SURF_METADATA_FLAG_EXTRA_MD_BIT);
3774 if (info->family_overridden)
3775 metadata[0] |= 1u << (16 + AC_SURF_METADATA_FLAG_FAMILY_OVERRIDEN_BIT);
3776
3777 /* Tiling modes are ambiguous without a PCI ID. */
3778 metadata[1] = ac_get_umd_metadata_word1(info);
3779
3780 /* Dwords [2:9] contain the image descriptor. */
3781 memcpy(&metadata[2], desc, 8 * 4);
3782 *size_metadata = 10 * 4;
3783
3784 /* Dwords [10:..] contain the mipmap level offsets. */
3785 if (info->gfx_level <= GFX8) {
3786 for (unsigned i = 0; i < num_mipmap_levels; i++)
3787 metadata[10 + i] = surf->u.legacy.level[i].offset_256B;
3788
3789 *size_metadata += num_mipmap_levels * 4;
3790 } else if (include_tool_md) {
3791 if (surf->modifier != DRM_FORMAT_MOD_INVALID) {
3792 /* Modifier */
3793 metadata[10] = surf->modifier;
3794 metadata[11] = surf->modifier >> 32;
3795 /* Num planes */
3796 int nplanes = ac_surface_get_nplanes(surf);
3797 metadata[12] = nplanes;
3798 int ndw = 13;
3799 for (int i = 0; i < nplanes; i++) {
3800 metadata[ndw++] = ac_surface_get_plane_offset(info->gfx_level,
3801 surf, i, 0);
3802 metadata[ndw++] = ac_surface_get_plane_stride(info->gfx_level,
3803 surf, i, 0);
3804 }
3805 *size_metadata = ndw * 4;
3806 } else {
3807 metadata[10] = ac_surface_get_plane_stride(info->gfx_level,
3808 surf, 0, 0);
3809 *size_metadata = 11 * 4;
3810 }
3811 }
3812
3813 if (info->family_overridden) {
3814 int n_dw = *size_metadata / 4;
3815 assert(n_dw < 64 - 1);
3816 metadata[n_dw] = info->gfx_level;
3817 *size_metadata += 4;
3818 }
3819 }
3820
ac_surface_get_pitch_align(const struct radeon_info * info,const struct radeon_surf * surf)3821 static uint32_t ac_surface_get_pitch_align(const struct radeon_info *info,
3822 const struct radeon_surf *surf)
3823 {
3824 if (surf->is_linear) {
3825 if (info->gfx_level >= GFX12)
3826 return 128 / surf->bpe;
3827 else if (info->gfx_level >= GFX9)
3828 return 256 / surf->bpe;
3829 else
3830 return MAX2(8, 64 / surf->bpe);
3831 }
3832
3833 if (info->gfx_level >= GFX12) {
3834 if (surf->u.gfx9.resource_type == RADEON_RESOURCE_3D)
3835 return 1u << 31; /* reject 3D textures by returning an impossible alignment */
3836
3837 unsigned bpe_log2 = util_logbase2(surf->bpe);
3838 unsigned block_size_log2;
3839
3840 switch (surf->u.gfx9.swizzle_mode) {
3841 case ADDR3_256B_2D:
3842 block_size_log2 = 8;
3843 break;
3844 case ADDR3_4KB_2D:
3845 block_size_log2 = 12;
3846 break;
3847 case ADDR3_64KB_2D:
3848 block_size_log2 = 16;
3849 break;
3850 case ADDR3_256KB_2D:
3851 block_size_log2 = 18;
3852 break;
3853 default:
3854 unreachable("unhandled swizzle mode");
3855 }
3856
3857 return 1 << ((block_size_log2 >> 1) - (bpe_log2 >> 1));
3858 } else if (info->gfx_level >= GFX9) {
3859 if (surf->u.gfx9.resource_type == RADEON_RESOURCE_3D)
3860 return 1u << 31; /* reject 3D textures by returning an impossible alignment */
3861
3862 unsigned bpe_log2 = util_logbase2(surf->bpe);
3863 unsigned block_size_log2;
3864
3865 switch((surf->u.gfx9.swizzle_mode & ~3) + 3) {
3866 case ADDR_SW_256B_R:
3867 block_size_log2 = 8;
3868 break;
3869 case ADDR_SW_4KB_R:
3870 case ADDR_SW_4KB_R_X:
3871 block_size_log2 = 12;
3872 break;
3873 case ADDR_SW_64KB_R:
3874 case ADDR_SW_64KB_R_T:
3875 case ADDR_SW_64KB_R_X:
3876 block_size_log2 = 16;
3877 break;
3878 case ADDR_SW_256KB_R_X:
3879 block_size_log2 = 18;
3880 break;
3881 default:
3882 unreachable("unhandled swizzle mode");
3883 }
3884
3885 if (info->gfx_level >= GFX10) {
3886 return 1 << (((block_size_log2 - bpe_log2) + 1) / 2);
3887 } else {
3888 static unsigned block_256B_width[] = {16, 16, 8, 8, 4};
3889 return block_256B_width[bpe_log2] << ((block_size_log2 - 8) / 2);
3890 }
3891 } else {
3892 unsigned mode;
3893
3894 if ((surf->flags & RADEON_SURF_Z_OR_SBUFFER) == RADEON_SURF_SBUFFER)
3895 mode = surf->u.legacy.zs.stencil_level[0].mode;
3896 else
3897 mode = surf->u.legacy.level[0].mode;
3898
3899 /* Note that display usage requires an alignment of 32 pixels (see AdjustPitchAlignment),
3900 * which is not checked here.
3901 */
3902 switch (mode) {
3903 case RADEON_SURF_MODE_1D:
3904 return 8;
3905 case RADEON_SURF_MODE_2D:
3906 return 8 * surf->u.legacy.bankw * surf->u.legacy.mtilea *
3907 ac_pipe_config_to_num_pipes(surf->u.legacy.pipe_config);
3908 default:
3909 unreachable("unhandled surf mode");
3910 }
3911 }
3912 }
3913
ac_surface_override_offset_stride(const struct radeon_info * info,struct radeon_surf * surf,unsigned num_layers,unsigned num_mipmap_levels,uint64_t offset,unsigned pitch)3914 bool ac_surface_override_offset_stride(const struct radeon_info *info, struct radeon_surf *surf,
3915 unsigned num_layers, unsigned num_mipmap_levels,
3916 uint64_t offset, unsigned pitch)
3917 {
3918 if ((ac_surface_get_pitch_align(info, surf) - 1) & pitch)
3919 return false;
3920
3921 /* Require an equal pitch with metadata (DCC), mipmapping, non-linear layout (that could be
3922 * relaxed), or when the chip is GFX10, which is the only generation that can't override
3923 * the pitch.
3924 */
3925 bool require_equal_pitch = surf->surf_size != surf->total_size ||
3926 num_layers != 1 ||
3927 num_mipmap_levels != 1 ||
3928 (info->gfx_level >= GFX9 && !surf->is_linear) ||
3929 info->gfx_level == GFX10;
3930
3931 if (info->gfx_level >= GFX9) {
3932 if (pitch) {
3933 if (surf->u.gfx9.surf_pitch != pitch && require_equal_pitch)
3934 return false;
3935
3936 if (pitch != surf->u.gfx9.surf_pitch) {
3937 unsigned slices = surf->surf_size / surf->u.gfx9.surf_slice_size;
3938
3939 surf->u.gfx9.uses_custom_pitch = true;
3940 surf->u.gfx9.surf_pitch = pitch;
3941 surf->u.gfx9.epitch = pitch - 1;
3942 surf->u.gfx9.pitch[0] = pitch;
3943 surf->u.gfx9.surf_slice_size = (uint64_t)pitch * surf->u.gfx9.surf_height * surf->bpe;
3944 surf->total_size = surf->surf_size = surf->u.gfx9.surf_slice_size * slices;
3945 }
3946 }
3947
3948 surf->u.gfx9.surf_offset = offset;
3949 if (surf->has_stencil)
3950 surf->u.gfx9.zs.stencil_offset += offset;
3951 } else {
3952 if (pitch) {
3953 if (surf->u.legacy.level[0].nblk_x != pitch && require_equal_pitch)
3954 return false;
3955
3956 surf->u.legacy.level[0].nblk_x = pitch;
3957 surf->u.legacy.level[0].slice_size_dw =
3958 ((uint64_t)pitch * surf->u.legacy.level[0].nblk_y * surf->bpe) / 4;
3959 }
3960
3961 if (offset) {
3962 for (unsigned i = 0; i < ARRAY_SIZE(surf->u.legacy.level); ++i)
3963 surf->u.legacy.level[i].offset_256B += offset / 256;
3964 }
3965 }
3966
3967 if (offset & ((1 << surf->alignment_log2) - 1) ||
3968 offset >= UINT64_MAX - surf->total_size)
3969 return false;
3970
3971 if (surf->meta_offset)
3972 surf->meta_offset += offset;
3973 if (surf->fmask_offset)
3974 surf->fmask_offset += offset;
3975 if (surf->cmask_offset)
3976 surf->cmask_offset += offset;
3977 if (surf->display_dcc_offset)
3978 surf->display_dcc_offset += offset;
3979 return true;
3980 }
3981
ac_surface_get_nplanes(const struct radeon_surf * surf)3982 unsigned ac_surface_get_nplanes(const struct radeon_surf *surf)
3983 {
3984 if (surf->modifier == DRM_FORMAT_MOD_INVALID)
3985 return 1;
3986 else if (surf->display_dcc_offset)
3987 return 3;
3988 else if (surf->meta_offset)
3989 return 2;
3990 else
3991 return 1;
3992 }
3993
ac_surface_get_plane_offset(enum amd_gfx_level gfx_level,const struct radeon_surf * surf,unsigned plane,unsigned layer)3994 uint64_t ac_surface_get_plane_offset(enum amd_gfx_level gfx_level,
3995 const struct radeon_surf *surf,
3996 unsigned plane, unsigned layer)
3997 {
3998 switch (plane) {
3999 case 0:
4000 if (gfx_level >= GFX9) {
4001 return surf->u.gfx9.surf_offset +
4002 layer * surf->u.gfx9.surf_slice_size;
4003 } else {
4004 return (uint64_t)surf->u.legacy.level[0].offset_256B * 256 +
4005 layer * (uint64_t)surf->u.legacy.level[0].slice_size_dw * 4;
4006 }
4007 case 1:
4008 assert(!layer);
4009 return surf->display_dcc_offset ?
4010 surf->display_dcc_offset : surf->meta_offset;
4011 case 2:
4012 assert(!layer);
4013 return surf->meta_offset;
4014 default:
4015 unreachable("Invalid plane index");
4016 }
4017 }
4018
ac_surface_get_plane_stride(enum amd_gfx_level gfx_level,const struct radeon_surf * surf,unsigned plane,unsigned level)4019 uint64_t ac_surface_get_plane_stride(enum amd_gfx_level gfx_level,
4020 const struct radeon_surf *surf,
4021 unsigned plane, unsigned level)
4022 {
4023 switch (plane) {
4024 case 0:
4025 if (gfx_level >= GFX9) {
4026 return (surf->is_linear ? surf->u.gfx9.pitch[level] : surf->u.gfx9.surf_pitch) * surf->bpe;
4027 } else {
4028 return surf->u.legacy.level[level].nblk_x * surf->bpe;
4029 }
4030 case 1:
4031 return 1 + (surf->display_dcc_offset ?
4032 surf->u.gfx9.color.display_dcc_pitch_max : surf->u.gfx9.color.dcc_pitch_max);
4033 case 2:
4034 return surf->u.gfx9.color.dcc_pitch_max + 1;
4035 default:
4036 unreachable("Invalid plane index");
4037 }
4038 }
4039
ac_surface_get_plane_size(const struct radeon_surf * surf,unsigned plane)4040 uint64_t ac_surface_get_plane_size(const struct radeon_surf *surf,
4041 unsigned plane)
4042 {
4043 switch (plane) {
4044 case 0:
4045 return surf->surf_size;
4046 case 1:
4047 return surf->display_dcc_offset ?
4048 surf->u.gfx9.color.display_dcc_size : surf->meta_size;
4049 case 2:
4050 return surf->meta_size;
4051 default:
4052 unreachable("Invalid plane index");
4053 }
4054 }
4055
4056 uint64_t
ac_surface_addr_from_coord(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct radeon_surf * surf,const struct ac_surf_info * surf_info,unsigned level,unsigned x,unsigned y,unsigned layer,bool is_3d)4057 ac_surface_addr_from_coord(struct ac_addrlib *addrlib, const struct radeon_info *info,
4058 const struct radeon_surf *surf, const struct ac_surf_info *surf_info,
4059 unsigned level, unsigned x, unsigned y, unsigned layer, bool is_3d)
4060 {
4061 /* Only implemented for GFX9+ */
4062 assert(info->gfx_level >= GFX9);
4063
4064 ADDR2_COMPUTE_SURFACE_ADDRFROMCOORD_INPUT input = {0};
4065 input.size = sizeof(ADDR2_COMPUTE_SURFACE_ADDRFROMCOORD_INPUT);
4066 input.slice = layer;
4067 input.mipId = level;
4068 input.unalignedWidth = DIV_ROUND_UP(surf_info->width, surf->blk_w);
4069 input.unalignedHeight = DIV_ROUND_UP(surf_info->height, surf->blk_h);
4070 input.numSlices = is_3d ? surf_info->depth : surf_info->array_size;
4071 input.numMipLevels = surf_info->levels;
4072 input.numSamples = surf_info->samples;
4073 input.numFrags = surf_info->samples;
4074 input.swizzleMode = surf->u.gfx9.swizzle_mode;
4075 input.resourceType = (AddrResourceType)surf->u.gfx9.resource_type;
4076 input.pipeBankXor = surf->tile_swizzle;
4077 input.bpp = surf->bpe * 8;
4078 input.x = x;
4079 input.y = y;
4080
4081 ADDR2_COMPUTE_SURFACE_ADDRFROMCOORD_OUTPUT output = {0};
4082 output.size = sizeof(ADDR2_COMPUTE_SURFACE_ADDRFROMCOORD_OUTPUT);
4083 Addr2ComputeSurfaceAddrFromCoord(addrlib->handle, &input, &output);
4084 return output.addr;
4085 }
4086
4087 static void
gfx12_surface_compute_nbc_view(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct radeon_surf * surf,const struct ac_surf_info * surf_info,unsigned level,unsigned layer,struct ac_surf_nbc_view * out)4088 gfx12_surface_compute_nbc_view(struct ac_addrlib *addrlib, const struct radeon_info *info,
4089 const struct radeon_surf *surf, const struct ac_surf_info *surf_info,
4090 unsigned level, unsigned layer, struct ac_surf_nbc_view *out)
4091 {
4092 ADDR3_COMPUTE_NONBLOCKCOMPRESSEDVIEW_INPUT input = {0};
4093 input.size = sizeof(ADDR3_COMPUTE_NONBLOCKCOMPRESSEDVIEW_INPUT);
4094 input.swizzleMode = surf->u.gfx9.swizzle_mode;
4095 input.resourceType = (AddrResourceType)surf->u.gfx9.resource_type;
4096 switch (surf->bpe) {
4097 case 8:
4098 input.format = ADDR_FMT_BC1;
4099 break;
4100 case 16:
4101 input.format = ADDR_FMT_BC3;
4102 break;
4103 default:
4104 assert(0);
4105 }
4106 input.unAlignedDims.width = surf_info->width;
4107 input.unAlignedDims.height = surf_info->height;
4108 input.numMipLevels = surf_info->levels;
4109 input.pipeBankXor = surf->tile_swizzle;
4110 input.slice = layer;
4111 input.mipId = level;
4112
4113 ADDR_E_RETURNCODE res;
4114 ADDR3_COMPUTE_NONBLOCKCOMPRESSEDVIEW_OUTPUT output = {0};
4115 output.size = sizeof(ADDR3_COMPUTE_NONBLOCKCOMPRESSEDVIEW_OUTPUT);
4116 res = Addr3ComputeNonBlockCompressedView(addrlib->handle, &input, &output);
4117 if (res == ADDR_OK) {
4118 out->base_address_offset = output.offset;
4119 out->tile_swizzle = output.pipeBankXor;
4120 out->width = output.unAlignedDims.width;
4121 out->height = output.unAlignedDims.height;
4122 out->num_levels = output.numMipLevels;
4123 out->level = output.mipId;
4124 out->valid = true;
4125 } else {
4126 out->valid = false;
4127 }
4128 }
4129
4130 static void
gfx10_surface_compute_nbc_view(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct radeon_surf * surf,const struct ac_surf_info * surf_info,unsigned level,unsigned layer,struct ac_surf_nbc_view * out)4131 gfx10_surface_compute_nbc_view(struct ac_addrlib *addrlib, const struct radeon_info *info,
4132 const struct radeon_surf *surf, const struct ac_surf_info *surf_info,
4133 unsigned level, unsigned layer, struct ac_surf_nbc_view *out)
4134 {
4135 ADDR2_COMPUTE_NONBLOCKCOMPRESSEDVIEW_INPUT input = {0};
4136 input.size = sizeof(ADDR2_COMPUTE_NONBLOCKCOMPRESSEDVIEW_INPUT);
4137 input.swizzleMode = surf->u.gfx9.swizzle_mode;
4138 input.resourceType = (AddrResourceType)surf->u.gfx9.resource_type;
4139 switch (surf->bpe) {
4140 case 8:
4141 input.format = ADDR_FMT_BC1;
4142 break;
4143 case 16:
4144 input.format = ADDR_FMT_BC3;
4145 break;
4146 default:
4147 assert(0);
4148 }
4149 input.width = surf_info->width;
4150 input.height = surf_info->height;
4151 input.numSlices = surf_info->array_size;
4152 input.numMipLevels = surf_info->levels;
4153 input.pipeBankXor = surf->tile_swizzle;
4154 input.slice = layer;
4155 input.mipId = level;
4156
4157 ADDR_E_RETURNCODE res;
4158 ADDR2_COMPUTE_NONBLOCKCOMPRESSEDVIEW_OUTPUT output = {0};
4159 output.size = sizeof(ADDR2_COMPUTE_NONBLOCKCOMPRESSEDVIEW_OUTPUT);
4160 res = Addr2ComputeNonBlockCompressedView(addrlib->handle, &input, &output);
4161 if (res == ADDR_OK) {
4162 out->base_address_offset = output.offset;
4163 out->tile_swizzle = output.pipeBankXor;
4164 out->width = output.unalignedWidth;
4165 out->height = output.unalignedHeight;
4166 out->num_levels = output.numMipLevels;
4167 out->level = output.mipId;
4168 out->valid = true;
4169 } else {
4170 out->valid = false;
4171 }
4172 }
4173
4174 void
ac_surface_compute_nbc_view(struct ac_addrlib * addrlib,const struct radeon_info * info,const struct radeon_surf * surf,const struct ac_surf_info * surf_info,unsigned level,unsigned layer,struct ac_surf_nbc_view * out)4175 ac_surface_compute_nbc_view(struct ac_addrlib *addrlib, const struct radeon_info *info,
4176 const struct radeon_surf *surf, const struct ac_surf_info *surf_info,
4177 unsigned level, unsigned layer, struct ac_surf_nbc_view *out)
4178 {
4179 /* Only implemented for GFX10+ */
4180 assert(info->gfx_level >= GFX10);
4181
4182 if (info->gfx_level >= GFX12) {
4183 gfx12_surface_compute_nbc_view(addrlib, info, surf, surf_info, level, layer, out);
4184 } else {
4185 gfx10_surface_compute_nbc_view(addrlib, info, surf, surf_info, level, layer, out);
4186 }
4187 }
4188
ac_surface_print_info(FILE * out,const struct radeon_info * info,const struct radeon_surf * surf)4189 void ac_surface_print_info(FILE *out, const struct radeon_info *info,
4190 const struct radeon_surf *surf)
4191 {
4192 if (info->gfx_level >= GFX9) {
4193 fprintf(out,
4194 " Surf: size=%" PRIu64 ", slice_size=%" PRIu64 ", "
4195 "alignment=%u, swmode=%u, tile_swizzle=%u, epitch=%u, pitch=%u, blk_w=%u, "
4196 "blk_h=%u, bpe=%u, flags=0x%"PRIx64"\n",
4197 surf->surf_size, surf->u.gfx9.surf_slice_size,
4198 1 << surf->surf_alignment_log2, surf->u.gfx9.swizzle_mode, surf->tile_swizzle,
4199 surf->u.gfx9.epitch, surf->u.gfx9.surf_pitch,
4200 surf->blk_w, surf->blk_h, surf->bpe, surf->flags);
4201
4202 if (surf->fmask_offset)
4203 fprintf(out,
4204 " FMask: offset=%" PRIu64 ", size=%" PRIu64 ", "
4205 "alignment=%u, swmode=%u, epitch=%u\n",
4206 surf->fmask_offset, surf->fmask_size,
4207 1 << surf->fmask_alignment_log2, surf->u.gfx9.color.fmask_swizzle_mode,
4208 surf->u.gfx9.color.fmask_epitch);
4209
4210 if (surf->cmask_offset)
4211 fprintf(out,
4212 " CMask: offset=%" PRIu64 ", size=%u, "
4213 "alignment=%u\n",
4214 surf->cmask_offset, surf->cmask_size,
4215 1 << surf->cmask_alignment_log2);
4216
4217 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER && surf->meta_offset)
4218 fprintf(out,
4219 " HTile: offset=%" PRIu64 ", size=%u, alignment=%u\n",
4220 surf->meta_offset, surf->meta_size,
4221 1 << surf->meta_alignment_log2);
4222
4223 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && surf->meta_offset)
4224 fprintf(out,
4225 " DCC: offset=%" PRIu64 ", size=%u, "
4226 "alignment=%u, pitch_max=%u, num_dcc_levels=%u\n",
4227 surf->meta_offset, surf->meta_size, 1 << surf->meta_alignment_log2,
4228 surf->u.gfx9.color.display_dcc_pitch_max, surf->num_meta_levels);
4229
4230 if (surf->has_stencil)
4231 fprintf(out,
4232 " Stencil: offset=%" PRIu64 ", swmode=%u, epitch=%u\n",
4233 surf->u.gfx9.zs.stencil_offset,
4234 surf->u.gfx9.zs.stencil_swizzle_mode,
4235 surf->u.gfx9.zs.stencil_epitch);
4236
4237 if (info->gfx_level == GFX12) {
4238 if (surf->u.gfx9.zs.hiz.size) {
4239 fprintf(out,
4240 " HiZ: offset=%" PRIu64 ", size=%u, swmode=%u, width_in_tiles=%u, height_in_tiles=%u\n",
4241 surf->u.gfx9.zs.hiz.offset, surf->u.gfx9.zs.hiz.size, surf->u.gfx9.zs.hiz.swizzle_mode,
4242 surf->u.gfx9.zs.hiz.width_in_tiles, surf->u.gfx9.zs.hiz.height_in_tiles);
4243 }
4244
4245 if (surf->u.gfx9.zs.his.size) {
4246 fprintf(out,
4247 " HiS: offset=%" PRIu64 ", size=%u, swmode=%u, width_in_tiles=%u, height_in_tiles=%u\n",
4248 surf->u.gfx9.zs.his.offset, surf->u.gfx9.zs.his.size, surf->u.gfx9.zs.his.swizzle_mode,
4249 surf->u.gfx9.zs.his.width_in_tiles, surf->u.gfx9.zs.his.height_in_tiles);
4250 }
4251 }
4252 } else {
4253 fprintf(out,
4254 " Surf: size=%" PRIu64 ", alignment=%u, blk_w=%u, blk_h=%u, "
4255 "bpe=%u, flags=0x%"PRIx64"\n",
4256 surf->surf_size, 1 << surf->surf_alignment_log2, surf->blk_w,
4257 surf->blk_h, surf->bpe, surf->flags);
4258
4259 fprintf(out,
4260 " Layout: size=%" PRIu64 ", alignment=%u, bankw=%u, bankh=%u, "
4261 "nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n",
4262 surf->surf_size, 1 << surf->surf_alignment_log2,
4263 surf->u.legacy.bankw, surf->u.legacy.bankh,
4264 surf->u.legacy.num_banks, surf->u.legacy.mtilea,
4265 surf->u.legacy.tile_split, surf->u.legacy.pipe_config,
4266 (surf->flags & RADEON_SURF_SCANOUT) != 0);
4267
4268 if (surf->fmask_offset)
4269 fprintf(out,
4270 " FMask: offset=%" PRIu64 ", size=%" PRIu64 ", "
4271 "alignment=%u, pitch_in_pixels=%u, bankh=%u, "
4272 "slice_tile_max=%u, tile_mode_index=%u\n",
4273 surf->fmask_offset, surf->fmask_size,
4274 1 << surf->fmask_alignment_log2, surf->u.legacy.color.fmask.pitch_in_pixels,
4275 surf->u.legacy.color.fmask.bankh,
4276 surf->u.legacy.color.fmask.slice_tile_max,
4277 surf->u.legacy.color.fmask.tiling_index);
4278
4279 if (surf->cmask_offset)
4280 fprintf(out,
4281 " CMask: offset=%" PRIu64 ", size=%u, alignment=%u, "
4282 "slice_tile_max=%u\n",
4283 surf->cmask_offset, surf->cmask_size,
4284 1 << surf->cmask_alignment_log2, surf->u.legacy.color.cmask_slice_tile_max);
4285
4286 if (surf->flags & RADEON_SURF_Z_OR_SBUFFER && surf->meta_offset)
4287 fprintf(out, " HTile: offset=%" PRIu64 ", size=%u, alignment=%u\n",
4288 surf->meta_offset, surf->meta_size,
4289 1 << surf->meta_alignment_log2);
4290
4291 if (!(surf->flags & RADEON_SURF_Z_OR_SBUFFER) && surf->meta_offset)
4292 fprintf(out, " DCC: offset=%" PRIu64 ", size=%u, alignment=%u\n",
4293 surf->meta_offset, surf->meta_size, 1 << surf->meta_alignment_log2);
4294
4295 if (surf->has_stencil)
4296 fprintf(out, " StencilLayout: tilesplit=%u\n",
4297 surf->u.legacy.stencil_tile_split);
4298 }
4299 }
4300
gfx10_nir_meta_addr_from_coord(nir_builder * b,const struct radeon_info * info,const struct gfx9_meta_equation * equation,int blkSizeBias,unsigned blkStart,nir_def * meta_pitch,nir_def * meta_slice_size,nir_def * x,nir_def * y,nir_def * z,nir_def * pipe_xor,nir_def ** bit_position)4301 static nir_def *gfx10_nir_meta_addr_from_coord(nir_builder *b, const struct radeon_info *info,
4302 const struct gfx9_meta_equation *equation,
4303 int blkSizeBias, unsigned blkStart,
4304 nir_def *meta_pitch, nir_def *meta_slice_size,
4305 nir_def *x, nir_def *y, nir_def *z,
4306 nir_def *pipe_xor,
4307 nir_def **bit_position)
4308 {
4309 nir_def *zero = nir_imm_int(b, 0);
4310 nir_def *one = nir_imm_int(b, 1);
4311
4312 assert(info->gfx_level >= GFX10);
4313
4314 unsigned meta_block_width_log2 = util_logbase2(equation->meta_block_width);
4315 unsigned meta_block_height_log2 = util_logbase2(equation->meta_block_height);
4316 unsigned blkSizeLog2 = meta_block_width_log2 + meta_block_height_log2 + blkSizeBias;
4317
4318 nir_def *coord[] = {x, y, z, 0};
4319 nir_def *address = zero;
4320
4321 for (unsigned i = blkStart; i < blkSizeLog2 + 1; i++) {
4322 nir_def *v = zero;
4323
4324 for (unsigned c = 0; c < 4; c++) {
4325 unsigned index = i * 4 + c - (blkStart * 4);
4326 if (equation->u.gfx10_bits[index]) {
4327 unsigned mask = equation->u.gfx10_bits[index];
4328 nir_def *bits = coord[c];
4329
4330 while (mask)
4331 v = nir_ixor(b, v, nir_iand(b, nir_ushr_imm(b, bits, u_bit_scan(&mask)), one));
4332 }
4333 }
4334
4335 address = nir_ior(b, address, nir_ishl_imm(b, v, i));
4336 }
4337
4338 unsigned blkMask = (1 << blkSizeLog2) - 1;
4339 unsigned pipeMask = (1 << G_0098F8_NUM_PIPES(info->gb_addr_config)) - 1;
4340 unsigned m_pipeInterleaveLog2 = 8 + G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config);
4341 nir_def *xb = nir_ushr_imm(b, x, meta_block_width_log2);
4342 nir_def *yb = nir_ushr_imm(b, y, meta_block_height_log2);
4343 nir_def *pb = nir_ushr_imm(b, meta_pitch, meta_block_width_log2);
4344 nir_def *blkIndex = nir_iadd(b, nir_imul(b, yb, pb), xb);
4345 nir_def *pipeXor = nir_iand_imm(b, nir_ishl_imm(b, nir_iand_imm(b, pipe_xor, pipeMask),
4346 m_pipeInterleaveLog2), blkMask);
4347
4348 if (bit_position)
4349 *bit_position = nir_ishl_imm(b, nir_iand_imm(b, address, 1), 2);
4350
4351 return nir_iadd(b, nir_iadd(b, nir_imul(b, meta_slice_size, z),
4352 nir_imul(b, blkIndex, nir_ishl_imm(b, one, blkSizeLog2))),
4353 nir_ixor(b, nir_ushr(b, address, one), pipeXor));
4354 }
4355
gfx9_nir_meta_addr_from_coord(nir_builder * b,const struct radeon_info * info,const struct gfx9_meta_equation * equation,nir_def * meta_pitch,nir_def * meta_height,nir_def * x,nir_def * y,nir_def * z,nir_def * sample,nir_def * pipe_xor,nir_def ** bit_position)4356 static nir_def *gfx9_nir_meta_addr_from_coord(nir_builder *b, const struct radeon_info *info,
4357 const struct gfx9_meta_equation *equation,
4358 nir_def *meta_pitch, nir_def *meta_height,
4359 nir_def *x, nir_def *y, nir_def *z,
4360 nir_def *sample, nir_def *pipe_xor,
4361 nir_def **bit_position)
4362 {
4363 nir_def *zero = nir_imm_int(b, 0);
4364 nir_def *one = nir_imm_int(b, 1);
4365
4366 assert(info->gfx_level >= GFX9);
4367
4368 unsigned meta_block_width_log2 = util_logbase2(equation->meta_block_width);
4369 unsigned meta_block_height_log2 = util_logbase2(equation->meta_block_height);
4370 unsigned meta_block_depth_log2 = util_logbase2(equation->meta_block_depth);
4371
4372 unsigned m_pipeInterleaveLog2 = 8 + G_0098F8_PIPE_INTERLEAVE_SIZE_GFX9(info->gb_addr_config);
4373 unsigned numPipeBits = equation->u.gfx9.num_pipe_bits;
4374 nir_def *pitchInBlock = nir_ushr_imm(b, meta_pitch, meta_block_width_log2);
4375 nir_def *sliceSizeInBlock = nir_imul(b, nir_ushr_imm(b, meta_height, meta_block_height_log2),
4376 pitchInBlock);
4377
4378 nir_def *xb = nir_ushr_imm(b, x, meta_block_width_log2);
4379 nir_def *yb = nir_ushr_imm(b, y, meta_block_height_log2);
4380 nir_def *zb = nir_ushr_imm(b, z, meta_block_depth_log2);
4381
4382 nir_def *blockIndex = nir_iadd(b, nir_iadd(b, nir_imul(b, zb, sliceSizeInBlock),
4383 nir_imul(b, yb, pitchInBlock)), xb);
4384 nir_def *coords[] = {x, y, z, sample, blockIndex};
4385
4386 nir_def *address = zero;
4387 unsigned num_bits = equation->u.gfx9.num_bits;
4388 assert(num_bits <= 32);
4389
4390 /* Compute the address up until the last bit that doesn't use the block index. */
4391 for (unsigned i = 0; i < num_bits - 1; i++) {
4392 nir_def *xor = zero;
4393
4394 for (unsigned c = 0; c < 5; c++) {
4395 if (equation->u.gfx9.bit[i].coord[c].dim >= 5)
4396 continue;
4397
4398 assert(equation->u.gfx9.bit[i].coord[c].ord < 32);
4399 nir_def *ison =
4400 nir_iand(b, nir_ushr_imm(b, coords[equation->u.gfx9.bit[i].coord[c].dim],
4401 equation->u.gfx9.bit[i].coord[c].ord), one);
4402
4403 xor = nir_ixor(b, xor, ison);
4404 }
4405 address = nir_ior(b, address, nir_ishl_imm(b, xor, i));
4406 }
4407
4408 /* Fill the remaining bits with the block index. */
4409 unsigned last = num_bits - 1;
4410 address = nir_ior(b, address,
4411 nir_ishl_imm(b, nir_ushr_imm(b, blockIndex,
4412 equation->u.gfx9.bit[last].coord[0].ord),
4413 last));
4414
4415 if (bit_position)
4416 *bit_position = nir_ishl_imm(b, nir_iand_imm(b, address, 1), 2);
4417
4418 nir_def *pipeXor = nir_iand_imm(b, pipe_xor, (1 << numPipeBits) - 1);
4419 return nir_ixor(b, nir_ushr(b, address, one),
4420 nir_ishl_imm(b, pipeXor, m_pipeInterleaveLog2));
4421 }
4422
ac_nir_dcc_addr_from_coord(nir_builder * b,const struct radeon_info * info,unsigned bpe,const struct gfx9_meta_equation * equation,nir_def * dcc_pitch,nir_def * dcc_height,nir_def * dcc_slice_size,nir_def * x,nir_def * y,nir_def * z,nir_def * sample,nir_def * pipe_xor)4423 nir_def *ac_nir_dcc_addr_from_coord(nir_builder *b, const struct radeon_info *info,
4424 unsigned bpe, const struct gfx9_meta_equation *equation,
4425 nir_def *dcc_pitch, nir_def *dcc_height,
4426 nir_def *dcc_slice_size,
4427 nir_def *x, nir_def *y, nir_def *z,
4428 nir_def *sample, nir_def *pipe_xor)
4429 {
4430 if (info->gfx_level >= GFX10) {
4431 unsigned bpp_log2 = util_logbase2(bpe);
4432
4433 return gfx10_nir_meta_addr_from_coord(b, info, equation, bpp_log2 - 8, 1,
4434 dcc_pitch, dcc_slice_size,
4435 x, y, z, pipe_xor, NULL);
4436 } else {
4437 return gfx9_nir_meta_addr_from_coord(b, info, equation, dcc_pitch,
4438 dcc_height, x, y, z,
4439 sample, pipe_xor, NULL);
4440 }
4441 }
4442
ac_nir_cmask_addr_from_coord(nir_builder * b,const struct radeon_info * info,const struct gfx9_meta_equation * equation,nir_def * cmask_pitch,nir_def * cmask_height,nir_def * cmask_slice_size,nir_def * x,nir_def * y,nir_def * z,nir_def * pipe_xor,nir_def ** bit_position)4443 nir_def *ac_nir_cmask_addr_from_coord(nir_builder *b, const struct radeon_info *info,
4444 const struct gfx9_meta_equation *equation,
4445 nir_def *cmask_pitch, nir_def *cmask_height,
4446 nir_def *cmask_slice_size,
4447 nir_def *x, nir_def *y, nir_def *z,
4448 nir_def *pipe_xor,
4449 nir_def **bit_position)
4450 {
4451 nir_def *zero = nir_imm_int(b, 0);
4452
4453 if (info->gfx_level >= GFX10) {
4454 return gfx10_nir_meta_addr_from_coord(b, info, equation, -7, 1,
4455 cmask_pitch, cmask_slice_size,
4456 x, y, z, pipe_xor, bit_position);
4457 } else {
4458 return gfx9_nir_meta_addr_from_coord(b, info, equation, cmask_pitch,
4459 cmask_height, x, y, z, zero,
4460 pipe_xor, bit_position);
4461 }
4462 }
4463
ac_nir_htile_addr_from_coord(nir_builder * b,const struct radeon_info * info,const struct gfx9_meta_equation * equation,nir_def * htile_pitch,nir_def * htile_slice_size,nir_def * x,nir_def * y,nir_def * z,nir_def * pipe_xor)4464 nir_def *ac_nir_htile_addr_from_coord(nir_builder *b, const struct radeon_info *info,
4465 const struct gfx9_meta_equation *equation,
4466 nir_def *htile_pitch,
4467 nir_def *htile_slice_size,
4468 nir_def *x, nir_def *y, nir_def *z,
4469 nir_def *pipe_xor)
4470 {
4471 return gfx10_nir_meta_addr_from_coord(b, info, equation, -4, 2,
4472 htile_pitch, htile_slice_size,
4473 x, y, z, pipe_xor, NULL);
4474 }
4475