1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "si_pipe.h"
26 #include "sid.h"
27 #include "util/format/u_format.h"
28 #include "util/u_pack_color.h"
29 #include "util/u_surface.h"
30
31 enum
32 {
33 SI_CLEAR = SI_SAVE_FRAGMENT_STATE,
34 SI_CLEAR_SURFACE = SI_SAVE_FRAMEBUFFER | SI_SAVE_FRAGMENT_STATE,
35 };
36
si_alloc_separate_cmask(struct si_screen * sscreen,struct si_texture * tex)37 static void si_alloc_separate_cmask(struct si_screen *sscreen, struct si_texture *tex)
38 {
39 /* CMASK for MSAA is allocated in advance or always disabled
40 * by "nofmask" option.
41 */
42 if (tex->cmask_buffer || !tex->surface.cmask_size || tex->buffer.b.b.nr_samples >= 2)
43 return;
44
45 tex->cmask_buffer =
46 si_aligned_buffer_create(&sscreen->b, SI_RESOURCE_FLAG_UNMAPPABLE, PIPE_USAGE_DEFAULT,
47 tex->surface.cmask_size, tex->surface.cmask_alignment);
48 if (tex->cmask_buffer == NULL)
49 return;
50
51 tex->cmask_base_address_reg = tex->cmask_buffer->gpu_address >> 8;
52 tex->cb_color_info |= S_028C70_FAST_CLEAR(1);
53
54 p_atomic_inc(&sscreen->compressed_colortex_counter);
55 }
56
si_set_clear_color(struct si_texture * tex,enum pipe_format surface_format,const union pipe_color_union * color)57 static bool si_set_clear_color(struct si_texture *tex, enum pipe_format surface_format,
58 const union pipe_color_union *color)
59 {
60 union util_color uc;
61
62 memset(&uc, 0, sizeof(uc));
63
64 if (tex->surface.bpe == 16) {
65 /* DCC fast clear only:
66 * CLEAR_WORD0 = R = G = B
67 * CLEAR_WORD1 = A
68 */
69 assert(color->ui[0] == color->ui[1] && color->ui[0] == color->ui[2]);
70 uc.ui[0] = color->ui[0];
71 uc.ui[1] = color->ui[3];
72 } else {
73 util_pack_color_union(surface_format, &uc, color);
74 }
75
76 if (memcmp(tex->color_clear_value, &uc, 2 * sizeof(uint32_t)) == 0)
77 return false;
78
79 memcpy(tex->color_clear_value, &uc, 2 * sizeof(uint32_t));
80 return true;
81 }
82
83 /** Linearize and convert luminace/intensity to red. */
si_simplify_cb_format(enum pipe_format format)84 enum pipe_format si_simplify_cb_format(enum pipe_format format)
85 {
86 format = util_format_linear(format);
87 format = util_format_luminance_to_red(format);
88 return util_format_intensity_to_red(format);
89 }
90
vi_alpha_is_on_msb(struct si_screen * sscreen,enum pipe_format format)91 bool vi_alpha_is_on_msb(struct si_screen *sscreen, enum pipe_format format)
92 {
93 format = si_simplify_cb_format(format);
94 const struct util_format_description *desc = util_format_description(format);
95
96 /* Formats with 3 channels can't have alpha. */
97 if (desc->nr_channels == 3)
98 return true; /* same as xxxA; is any value OK here? */
99
100 if (sscreen->info.chip_class >= GFX10 && desc->nr_channels == 1)
101 return desc->swizzle[3] == PIPE_SWIZZLE_X;
102
103 return si_translate_colorswap(format, false) <= 1;
104 }
105
vi_get_fast_clear_parameters(struct si_screen * sscreen,enum pipe_format base_format,enum pipe_format surface_format,const union pipe_color_union * color,uint32_t * clear_value,bool * eliminate_needed)106 static bool vi_get_fast_clear_parameters(struct si_screen *sscreen, enum pipe_format base_format,
107 enum pipe_format surface_format,
108 const union pipe_color_union *color, uint32_t *clear_value,
109 bool *eliminate_needed)
110 {
111 /* If we want to clear without needing a fast clear eliminate step, we
112 * can set color and alpha independently to 0 or 1 (or 0/max for integer
113 * formats).
114 */
115 bool values[4] = {}; /* whether to clear to 0 or 1 */
116 bool color_value = false; /* clear color to 0 or 1 */
117 bool alpha_value = false; /* clear alpha to 0 or 1 */
118 int alpha_channel; /* index of the alpha component */
119 bool has_color = false;
120 bool has_alpha = false;
121
122 const struct util_format_description *desc =
123 util_format_description(si_simplify_cb_format(surface_format));
124
125 /* 128-bit fast clear with different R,G,B values is unsupported. */
126 if (desc->block.bits == 128 && (color->ui[0] != color->ui[1] || color->ui[0] != color->ui[2]))
127 return false;
128
129 *eliminate_needed = true;
130 *clear_value = DCC_CLEAR_COLOR_REG;
131
132 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN)
133 return true; /* need ELIMINATE_FAST_CLEAR */
134
135 bool base_alpha_is_on_msb = vi_alpha_is_on_msb(sscreen, base_format);
136 bool surf_alpha_is_on_msb = vi_alpha_is_on_msb(sscreen, surface_format);
137
138 /* Formats with 3 channels can't have alpha. */
139 if (desc->nr_channels == 3)
140 alpha_channel = -1;
141 else if (surf_alpha_is_on_msb)
142 alpha_channel = desc->nr_channels - 1;
143 else
144 alpha_channel = 0;
145
146 for (int i = 0; i < 4; ++i) {
147 if (desc->swizzle[i] >= PIPE_SWIZZLE_0)
148 continue;
149
150 if (desc->channel[i].pure_integer && desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
151 /* Use the maximum value for clamping the clear color. */
152 int max = u_bit_consecutive(0, desc->channel[i].size - 1);
153
154 values[i] = color->i[i] != 0;
155 if (color->i[i] != 0 && MIN2(color->i[i], max) != max)
156 return true; /* need ELIMINATE_FAST_CLEAR */
157 } else if (desc->channel[i].pure_integer &&
158 desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) {
159 /* Use the maximum value for clamping the clear color. */
160 unsigned max = u_bit_consecutive(0, desc->channel[i].size);
161
162 values[i] = color->ui[i] != 0U;
163 if (color->ui[i] != 0U && MIN2(color->ui[i], max) != max)
164 return true; /* need ELIMINATE_FAST_CLEAR */
165 } else {
166 values[i] = color->f[i] != 0.0F;
167 if (color->f[i] != 0.0F && color->f[i] != 1.0F)
168 return true; /* need ELIMINATE_FAST_CLEAR */
169 }
170
171 if (desc->swizzle[i] == alpha_channel) {
172 alpha_value = values[i];
173 has_alpha = true;
174 } else {
175 color_value = values[i];
176 has_color = true;
177 }
178 }
179
180 /* If alpha isn't present, make it the same as color, and vice versa. */
181 if (!has_alpha)
182 alpha_value = color_value;
183 else if (!has_color)
184 color_value = alpha_value;
185
186 if (color_value != alpha_value && base_alpha_is_on_msb != surf_alpha_is_on_msb)
187 return true; /* require ELIMINATE_FAST_CLEAR */
188
189 /* Check if all color values are equal if they are present. */
190 for (int i = 0; i < 4; ++i) {
191 if (desc->swizzle[i] <= PIPE_SWIZZLE_W && desc->swizzle[i] != alpha_channel &&
192 values[i] != color_value)
193 return true; /* require ELIMINATE_FAST_CLEAR */
194 }
195
196 /* This doesn't need ELIMINATE_FAST_CLEAR.
197 * On chips predating Raven2, the DCC clear codes and the CB clear
198 * color registers must match.
199 */
200 *eliminate_needed = false;
201
202 if (color_value) {
203 if (alpha_value)
204 *clear_value = DCC_CLEAR_COLOR_1111;
205 else
206 *clear_value = DCC_CLEAR_COLOR_1110;
207 } else {
208 if (alpha_value)
209 *clear_value = DCC_CLEAR_COLOR_0001;
210 else
211 *clear_value = DCC_CLEAR_COLOR_0000;
212 }
213 return true;
214 }
215
vi_dcc_clear_level(struct si_context * sctx,struct si_texture * tex,unsigned level,unsigned clear_value)216 bool vi_dcc_clear_level(struct si_context *sctx, struct si_texture *tex, unsigned level,
217 unsigned clear_value)
218 {
219 struct pipe_resource *dcc_buffer;
220 uint64_t dcc_offset, clear_size;
221
222 assert(vi_dcc_enabled(tex, level));
223
224 if (tex->dcc_separate_buffer) {
225 dcc_buffer = &tex->dcc_separate_buffer->b.b;
226 dcc_offset = 0;
227 } else {
228 dcc_buffer = &tex->buffer.b.b;
229 dcc_offset = tex->surface.dcc_offset;
230 }
231
232 if (sctx->chip_class >= GFX9) {
233 /* Mipmap level clears aren't implemented. */
234 if (tex->buffer.b.b.last_level > 0)
235 return false;
236
237 /* 4x and 8x MSAA needs a sophisticated compute shader for
238 * the clear. See AMDVLK. */
239 if (tex->buffer.b.b.nr_storage_samples >= 4)
240 return false;
241
242 clear_size = tex->surface.dcc_size;
243 } else {
244 unsigned num_layers = util_num_layers(&tex->buffer.b.b, level);
245
246 /* If this is 0, fast clear isn't possible. (can occur with MSAA) */
247 if (!tex->surface.u.legacy.level[level].dcc_fast_clear_size)
248 return false;
249
250 /* Layered 4x and 8x MSAA DCC fast clears need to clear
251 * dcc_fast_clear_size bytes for each layer. A compute shader
252 * would be more efficient than separate per-layer clear operations.
253 */
254 if (tex->buffer.b.b.nr_storage_samples >= 4 && num_layers > 1)
255 return false;
256
257 dcc_offset += tex->surface.u.legacy.level[level].dcc_offset;
258 clear_size = tex->surface.u.legacy.level[level].dcc_fast_clear_size * num_layers;
259 }
260
261 si_clear_buffer(sctx, dcc_buffer, dcc_offset, clear_size, &clear_value, 4, SI_COHERENCY_CB_META,
262 false);
263 return true;
264 }
265
266 /* Set the same micro tile mode as the destination of the last MSAA resolve.
267 * This allows hitting the MSAA resolve fast path, which requires that both
268 * src and dst micro tile modes match.
269 */
si_set_optimal_micro_tile_mode(struct si_screen * sscreen,struct si_texture * tex)270 static void si_set_optimal_micro_tile_mode(struct si_screen *sscreen, struct si_texture *tex)
271 {
272 if (sscreen->info.chip_class >= GFX10 || tex->buffer.b.is_shared ||
273 tex->buffer.b.b.nr_samples <= 1 ||
274 tex->surface.micro_tile_mode == tex->last_msaa_resolve_target_micro_mode)
275 return;
276
277 assert(sscreen->info.chip_class >= GFX9 ||
278 tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
279 assert(tex->buffer.b.b.last_level == 0);
280
281 if (sscreen->info.chip_class >= GFX9) {
282 /* 4K or larger tiles only. 0 is linear. 1-3 are 256B tiles. */
283 assert(tex->surface.u.gfx9.surf.swizzle_mode >= 4);
284
285 /* If you do swizzle_mode % 4, you'll get:
286 * 0 = Depth
287 * 1 = Standard,
288 * 2 = Displayable
289 * 3 = Rotated
290 *
291 * Depth-sample order isn't allowed:
292 */
293 assert(tex->surface.u.gfx9.surf.swizzle_mode % 4 != 0);
294
295 switch (tex->last_msaa_resolve_target_micro_mode) {
296 case RADEON_MICRO_MODE_DISPLAY:
297 tex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
298 tex->surface.u.gfx9.surf.swizzle_mode += 2; /* D */
299 break;
300 case RADEON_MICRO_MODE_STANDARD:
301 tex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
302 tex->surface.u.gfx9.surf.swizzle_mode += 1; /* S */
303 break;
304 case RADEON_MICRO_MODE_RENDER:
305 tex->surface.u.gfx9.surf.swizzle_mode &= ~0x3;
306 tex->surface.u.gfx9.surf.swizzle_mode += 3; /* R */
307 break;
308 default: /* depth */
309 assert(!"unexpected micro mode");
310 return;
311 }
312 } else if (sscreen->info.chip_class >= GFX7) {
313 /* These magic numbers were copied from addrlib. It doesn't use
314 * any definitions for them either. They are all 2D_TILED_THIN1
315 * modes with different bpp and micro tile mode.
316 */
317 switch (tex->last_msaa_resolve_target_micro_mode) {
318 case RADEON_MICRO_MODE_DISPLAY:
319 tex->surface.u.legacy.tiling_index[0] = 10;
320 break;
321 case RADEON_MICRO_MODE_STANDARD:
322 tex->surface.u.legacy.tiling_index[0] = 14;
323 break;
324 case RADEON_MICRO_MODE_RENDER:
325 tex->surface.u.legacy.tiling_index[0] = 28;
326 break;
327 default: /* depth, thick */
328 assert(!"unexpected micro mode");
329 return;
330 }
331 } else { /* GFX6 */
332 switch (tex->last_msaa_resolve_target_micro_mode) {
333 case RADEON_MICRO_MODE_DISPLAY:
334 switch (tex->surface.bpe) {
335 case 1:
336 tex->surface.u.legacy.tiling_index[0] = 10;
337 break;
338 case 2:
339 tex->surface.u.legacy.tiling_index[0] = 11;
340 break;
341 default: /* 4, 8 */
342 tex->surface.u.legacy.tiling_index[0] = 12;
343 break;
344 }
345 break;
346 case RADEON_MICRO_MODE_STANDARD:
347 switch (tex->surface.bpe) {
348 case 1:
349 tex->surface.u.legacy.tiling_index[0] = 14;
350 break;
351 case 2:
352 tex->surface.u.legacy.tiling_index[0] = 15;
353 break;
354 case 4:
355 tex->surface.u.legacy.tiling_index[0] = 16;
356 break;
357 default: /* 8, 16 */
358 tex->surface.u.legacy.tiling_index[0] = 17;
359 break;
360 }
361 break;
362 default: /* depth, thick */
363 assert(!"unexpected micro mode");
364 return;
365 }
366 }
367
368 tex->surface.micro_tile_mode = tex->last_msaa_resolve_target_micro_mode;
369
370 p_atomic_inc(&sscreen->dirty_tex_counter);
371 }
372
si_do_fast_color_clear(struct si_context * sctx,unsigned * buffers,const union pipe_color_union * color)373 static void si_do_fast_color_clear(struct si_context *sctx, unsigned *buffers,
374 const union pipe_color_union *color)
375 {
376 struct pipe_framebuffer_state *fb = &sctx->framebuffer.state;
377 int i;
378
379 /* This function is broken in BE, so just disable this path for now */
380 #if UTIL_ARCH_BIG_ENDIAN
381 return;
382 #endif
383
384 if (sctx->render_cond)
385 return;
386
387 for (i = 0; i < fb->nr_cbufs; i++) {
388 struct si_texture *tex;
389 unsigned clear_bit = PIPE_CLEAR_COLOR0 << i;
390
391 if (!fb->cbufs[i])
392 continue;
393
394 /* if this colorbuffer is not being cleared */
395 if (!(*buffers & clear_bit))
396 continue;
397
398 unsigned level = fb->cbufs[i]->u.tex.level;
399 if (level > 0)
400 continue;
401
402 tex = (struct si_texture *)fb->cbufs[i]->texture;
403
404 /* TODO: GFX9: Implement DCC fast clear for level 0 of
405 * mipmapped textures. Mipmapped DCC has to clear a rectangular
406 * area of DCC for level 0 (because the whole miptree is
407 * organized in a 2D plane).
408 */
409 if (sctx->chip_class >= GFX9 && tex->buffer.b.b.last_level > 0)
410 continue;
411
412 /* the clear is allowed if all layers are bound */
413 if (fb->cbufs[i]->u.tex.first_layer != 0 ||
414 fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->buffer.b.b, 0)) {
415 continue;
416 }
417
418 /* only supported on tiled surfaces */
419 if (tex->surface.is_linear) {
420 continue;
421 }
422
423 /* shared textures can't use fast clear without an explicit flush,
424 * because there is no way to communicate the clear color among
425 * all clients
426 */
427 if (tex->buffer.b.is_shared &&
428 !(tex->buffer.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH))
429 continue;
430
431 if (sctx->chip_class <= GFX8 && tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
432 !sctx->screen->info.htile_cmask_support_1d_tiling)
433 continue;
434
435 /* Use a slow clear for small surfaces where the cost of
436 * the eliminate pass can be higher than the benefit of fast
437 * clear. The closed driver does this, but the numbers may differ.
438 *
439 * This helps on both dGPUs and APUs, even small APUs like Mullins.
440 */
441 bool too_small = tex->buffer.b.b.nr_samples <= 1 &&
442 tex->buffer.b.b.width0 * tex->buffer.b.b.height0 <= 512 * 512;
443 bool eliminate_needed = false;
444 bool fmask_decompress_needed = false;
445
446 /* Fast clear is the most appropriate place to enable DCC for
447 * displayable surfaces.
448 */
449 if (sctx->family == CHIP_STONEY && !too_small) {
450 vi_separate_dcc_try_enable(sctx, tex);
451
452 /* RB+ isn't supported with a CMASK clear only on Stoney,
453 * so all clears are considered to be hypothetically slow
454 * clears, which is weighed when determining whether to
455 * enable separate DCC.
456 */
457 if (tex->dcc_gather_statistics) /* only for Stoney */
458 tex->num_slow_clears++;
459 }
460
461 /* Try to clear DCC first, otherwise try CMASK. */
462 if (vi_dcc_enabled(tex, 0)) {
463 uint32_t reset_value;
464
465 if (sctx->screen->debug_flags & DBG(NO_DCC_CLEAR))
466 continue;
467
468 if (!vi_get_fast_clear_parameters(sctx->screen, tex->buffer.b.b.format,
469 fb->cbufs[i]->format, color, &reset_value,
470 &eliminate_needed))
471 continue;
472
473 if (eliminate_needed && too_small)
474 continue;
475
476 /* TODO: This DCC+CMASK clear doesn't work with MSAA. */
477 if (tex->buffer.b.b.nr_samples >= 2 && tex->cmask_buffer && eliminate_needed)
478 continue;
479
480 if (!vi_dcc_clear_level(sctx, tex, 0, reset_value))
481 continue;
482
483 tex->separate_dcc_dirty = true;
484 tex->displayable_dcc_dirty = true;
485
486 /* DCC fast clear with MSAA should clear CMASK to 0xC. */
487 if (tex->buffer.b.b.nr_samples >= 2 && tex->cmask_buffer) {
488 uint32_t clear_value = 0xCCCCCCCC;
489 si_clear_buffer(sctx, &tex->cmask_buffer->b.b, tex->surface.cmask_offset,
490 tex->surface.cmask_size, &clear_value, 4, SI_COHERENCY_CB_META, false);
491 fmask_decompress_needed = true;
492 }
493 } else {
494 if (too_small)
495 continue;
496
497 /* 128-bit formats are unusupported */
498 if (tex->surface.bpe > 8) {
499 continue;
500 }
501
502 /* RB+ doesn't work with CMASK fast clear on Stoney. */
503 if (sctx->family == CHIP_STONEY)
504 continue;
505
506 /* Disable fast clear if tex is encrypted */
507 if (tex->buffer.flags & RADEON_FLAG_ENCRYPTED)
508 continue;
509
510 /* ensure CMASK is enabled */
511 si_alloc_separate_cmask(sctx->screen, tex);
512 if (!tex->cmask_buffer)
513 continue;
514
515 /* Do the fast clear. */
516 uint32_t clear_value = 0;
517 si_clear_buffer(sctx, &tex->cmask_buffer->b.b, tex->surface.cmask_offset,
518 tex->surface.cmask_size, &clear_value, 4, SI_COHERENCY_CB_META, false);
519 eliminate_needed = true;
520 }
521
522 if ((eliminate_needed || fmask_decompress_needed) &&
523 !(tex->dirty_level_mask & (1 << level))) {
524 tex->dirty_level_mask |= 1 << level;
525 p_atomic_inc(&sctx->screen->compressed_colortex_counter);
526 }
527
528 /* We can change the micro tile mode before a full clear. */
529 si_set_optimal_micro_tile_mode(sctx->screen, tex);
530
531 *buffers &= ~clear_bit;
532
533 /* Chips with DCC constant encoding don't need to set the clear
534 * color registers for DCC clear values 0 and 1.
535 */
536 if (sctx->screen->info.has_dcc_constant_encode && !eliminate_needed)
537 continue;
538
539 if (si_set_clear_color(tex, fb->cbufs[i]->format, color)) {
540 sctx->framebuffer.dirty_cbufs |= 1 << i;
541 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
542 }
543 }
544 }
545
si_clear(struct pipe_context * ctx,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * color,double depth,unsigned stencil)546 static void si_clear(struct pipe_context *ctx, unsigned buffers,
547 const struct pipe_scissor_state *scissor_state,
548 const union pipe_color_union *color, double depth, unsigned stencil)
549 {
550 struct si_context *sctx = (struct si_context *)ctx;
551 struct pipe_framebuffer_state *fb = &sctx->framebuffer.state;
552 struct pipe_surface *zsbuf = fb->zsbuf;
553 struct si_texture *zstex = zsbuf ? (struct si_texture *)zsbuf->texture : NULL;
554 bool needs_db_flush = false;
555
556 if (buffers & PIPE_CLEAR_COLOR) {
557 si_do_fast_color_clear(sctx, &buffers, color);
558 if (!buffers)
559 return; /* all buffers have been fast cleared */
560
561 /* These buffers cannot use fast clear, make sure to disable expansion. */
562 for (unsigned i = 0; i < fb->nr_cbufs; i++) {
563 struct si_texture *tex;
564
565 /* If not clearing this buffer, skip. */
566 if (!(buffers & (PIPE_CLEAR_COLOR0 << i)) || !fb->cbufs[i])
567 continue;
568
569 tex = (struct si_texture *)fb->cbufs[i]->texture;
570 if (tex->surface.fmask_size == 0)
571 tex->dirty_level_mask &= ~(1 << fb->cbufs[i]->u.tex.level);
572 }
573 }
574
575 if (zstex && zsbuf->u.tex.first_layer == 0 &&
576 zsbuf->u.tex.last_layer == util_max_layer(&zstex->buffer.b.b, 0)) {
577 /* See whether we should enable TC-compatible HTILE. */
578 if (zstex->enable_tc_compatible_htile_next_clear &&
579 !zstex->tc_compatible_htile &&
580 si_htile_enabled(zstex, zsbuf->u.tex.level, PIPE_MASK_ZS) &&
581 /* If both depth and stencil are present, they must be cleared together. */
582 ((buffers & PIPE_CLEAR_DEPTHSTENCIL) == PIPE_CLEAR_DEPTHSTENCIL ||
583 (buffers & PIPE_CLEAR_DEPTH && (!zstex->surface.has_stencil ||
584 zstex->htile_stencil_disabled)))) {
585 /* Enable TC-compatible HTILE. */
586 zstex->enable_tc_compatible_htile_next_clear = false;
587 zstex->tc_compatible_htile = true;
588
589 /* Update the framebuffer state to reflect the change. */
590 sctx->framebuffer.DB_has_shader_readable_metadata = true;
591 sctx->framebuffer.dirty_zsbuf = true;
592 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
593
594 /* Update all sampler views and shader images in all contexts. */
595 p_atomic_inc(&sctx->screen->dirty_tex_counter);
596
597 /* Re-initialize HTILE, so that it doesn't contain values incompatible
598 * with the new TC-compatible HTILE setting.
599 *
600 * 0xfffff30f = uncompressed Z + S
601 * 0xfffc000f = uncompressed Z only
602 *
603 * GFX8 always uses the Z+S HTILE format for TC-compatible HTILE even
604 * when stencil is not present.
605 */
606 uint32_t clear_value = (zstex->surface.has_stencil &&
607 !zstex->htile_stencil_disabled) ||
608 sctx->chip_class == GFX8 ? 0xfffff30f : 0xfffc000f;
609 si_clear_buffer(sctx, &zstex->buffer.b.b, zstex->surface.htile_offset,
610 zstex->surface.htile_size, &clear_value, 4,
611 SI_COHERENCY_DB_META, false);
612 }
613
614 /* TC-compatible HTILE only supports depth clears to 0 or 1. */
615 if (buffers & PIPE_CLEAR_DEPTH && si_htile_enabled(zstex, zsbuf->u.tex.level, PIPE_MASK_Z) &&
616 (!zstex->tc_compatible_htile || depth == 0 || depth == 1)) {
617 /* Need to disable EXPCLEAR temporarily if clearing
618 * to a new value. */
619 if (!zstex->depth_cleared || zstex->depth_clear_value != depth) {
620 sctx->db_depth_disable_expclear = true;
621 }
622
623 if (zstex->depth_clear_value != (float)depth) {
624 if ((zstex->depth_clear_value != 0) != (depth != 0)) {
625 /* ZRANGE_PRECISION register of a bound surface will change so we
626 * must flush the DB caches. */
627 needs_db_flush = true;
628 }
629 /* Update DB_DEPTH_CLEAR. */
630 zstex->depth_clear_value = depth;
631 sctx->framebuffer.dirty_zsbuf = true;
632 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
633 }
634 sctx->db_depth_clear = true;
635 si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
636 }
637
638 /* TC-compatible HTILE only supports stencil clears to 0. */
639 if (buffers & PIPE_CLEAR_STENCIL &&
640 si_htile_enabled(zstex, zsbuf->u.tex.level, PIPE_MASK_S) &&
641 (!zstex->tc_compatible_htile || stencil == 0)) {
642 stencil &= 0xff;
643
644 /* Need to disable EXPCLEAR temporarily if clearing
645 * to a new value. */
646 if (!zstex->stencil_cleared || zstex->stencil_clear_value != stencil) {
647 sctx->db_stencil_disable_expclear = true;
648 }
649
650 if (zstex->stencil_clear_value != (uint8_t)stencil) {
651 /* Update DB_STENCIL_CLEAR. */
652 zstex->stencil_clear_value = stencil;
653 sctx->framebuffer.dirty_zsbuf = true;
654 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
655 }
656 sctx->db_stencil_clear = true;
657 si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
658 }
659
660 if (needs_db_flush)
661 sctx->flags |= SI_CONTEXT_FLUSH_AND_INV_DB;
662 }
663
664 si_blitter_begin(sctx, SI_CLEAR);
665 util_blitter_clear(sctx->blitter, fb->width, fb->height, util_framebuffer_get_num_layers(fb),
666 buffers, color, depth, stencil, sctx->framebuffer.nr_samples > 1);
667 si_blitter_end(sctx);
668
669 if (sctx->db_depth_clear) {
670 sctx->db_depth_clear = false;
671 sctx->db_depth_disable_expclear = false;
672 zstex->depth_cleared = true;
673 si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
674 }
675
676 if (sctx->db_stencil_clear) {
677 sctx->db_stencil_clear = false;
678 sctx->db_stencil_disable_expclear = false;
679 zstex->stencil_cleared = true;
680 si_mark_atom_dirty(sctx, &sctx->atoms.s.db_render_state);
681 }
682 }
683
si_clear_render_target(struct pipe_context * ctx,struct pipe_surface * dst,const union pipe_color_union * color,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)684 static void si_clear_render_target(struct pipe_context *ctx, struct pipe_surface *dst,
685 const union pipe_color_union *color, unsigned dstx,
686 unsigned dsty, unsigned width, unsigned height,
687 bool render_condition_enabled)
688 {
689 struct si_context *sctx = (struct si_context *)ctx;
690 struct si_texture *sdst = (struct si_texture *)dst->texture;
691
692 if (dst->texture->nr_samples <= 1 && !vi_dcc_enabled(sdst, dst->u.tex.level)) {
693 si_compute_clear_render_target(ctx, dst, color, dstx, dsty, width, height,
694 render_condition_enabled);
695 return;
696 }
697
698 si_blitter_begin(sctx,
699 SI_CLEAR_SURFACE | (render_condition_enabled ? 0 : SI_DISABLE_RENDER_COND));
700 util_blitter_clear_render_target(sctx->blitter, dst, color, dstx, dsty, width, height);
701 si_blitter_end(sctx);
702 }
703
si_clear_depth_stencil(struct pipe_context * ctx,struct pipe_surface * dst,unsigned clear_flags,double depth,unsigned stencil,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)704 static void si_clear_depth_stencil(struct pipe_context *ctx, struct pipe_surface *dst,
705 unsigned clear_flags, double depth, unsigned stencil,
706 unsigned dstx, unsigned dsty, unsigned width, unsigned height,
707 bool render_condition_enabled)
708 {
709 struct si_context *sctx = (struct si_context *)ctx;
710
711 si_blitter_begin(sctx,
712 SI_CLEAR_SURFACE | (render_condition_enabled ? 0 : SI_DISABLE_RENDER_COND));
713 util_blitter_clear_depth_stencil(sctx->blitter, dst, clear_flags, depth, stencil, dstx, dsty,
714 width, height);
715 si_blitter_end(sctx);
716 }
717
si_clear_texture(struct pipe_context * pipe,struct pipe_resource * tex,unsigned level,const struct pipe_box * box,const void * data)718 static void si_clear_texture(struct pipe_context *pipe, struct pipe_resource *tex, unsigned level,
719 const struct pipe_box *box, const void *data)
720 {
721 struct pipe_screen *screen = pipe->screen;
722 struct si_texture *stex = (struct si_texture *)tex;
723 struct pipe_surface tmpl = {{0}};
724 struct pipe_surface *sf;
725
726 tmpl.format = tex->format;
727 tmpl.u.tex.first_layer = box->z;
728 tmpl.u.tex.last_layer = box->z + box->depth - 1;
729 tmpl.u.tex.level = level;
730 sf = pipe->create_surface(pipe, tex, &tmpl);
731 if (!sf)
732 return;
733
734 if (stex->is_depth) {
735 unsigned clear;
736 float depth;
737 uint8_t stencil = 0;
738
739 /* Depth is always present. */
740 clear = PIPE_CLEAR_DEPTH;
741 util_format_unpack_z_float(tex->format, &depth, data, 1);
742
743 if (stex->surface.has_stencil) {
744 clear |= PIPE_CLEAR_STENCIL;
745 util_format_unpack_s_8uint(tex->format, &stencil, data, 1);
746 }
747
748 si_clear_depth_stencil(pipe, sf, clear, depth, stencil, box->x, box->y, box->width,
749 box->height, false);
750 } else {
751 union pipe_color_union color;
752
753 util_format_unpack_rgba(tex->format, color.ui, data, 1);
754
755 if (screen->is_format_supported(screen, tex->format, tex->target, 0, 0,
756 PIPE_BIND_RENDER_TARGET)) {
757 si_clear_render_target(pipe, sf, &color, box->x, box->y, box->width, box->height, false);
758 } else {
759 /* Software fallback - just for R9G9B9E5_FLOAT */
760 util_clear_render_target(pipe, sf, &color, box->x, box->y, box->width, box->height);
761 }
762 }
763 pipe_surface_reference(&sf, NULL);
764 }
765
si_init_clear_functions(struct si_context * sctx)766 void si_init_clear_functions(struct si_context *sctx)
767 {
768 sctx->b.clear_render_target = si_clear_render_target;
769 sctx->b.clear_texture = si_clear_texture;
770
771 if (sctx->has_graphics) {
772 sctx->b.clear = si_clear;
773 sctx->b.clear_depth_stencil = si_clear_depth_stencil;
774 }
775 }
776