1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_resolve.c
25 *
26 * This file handles resolve tracking for main and auxiliary surfaces.
27 *
28 * It also handles our cache tracking. We have sets for the render cache,
29 * depth cache, and so on. If a BO is in a cache's set, then it may have
30 * data in that cache. The helpers take care of emitting flushes for
31 * render-to-texture, format reinterpretation issues, and other situations.
32 */
33
34 #include "util/hash_table.h"
35 #include "util/set.h"
36 #include "iris_context.h"
37 #include "compiler/nir/nir.h"
38
39 /**
40 * Disable auxiliary buffers if a renderbuffer is also bound as a texture
41 * or shader image. This causes a self-dependency, where both rendering
42 * and sampling may concurrently read or write the CCS buffer, causing
43 * incorrect pixels.
44 */
45 static bool
disable_rb_aux_buffer(struct iris_context * ice,bool * draw_aux_buffer_disabled,struct iris_resource * tex_res,unsigned min_level,unsigned num_levels,const char * usage)46 disable_rb_aux_buffer(struct iris_context *ice,
47 bool *draw_aux_buffer_disabled,
48 struct iris_resource *tex_res,
49 unsigned min_level, unsigned num_levels,
50 const char *usage)
51 {
52 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
53 bool found = false;
54
55 /* We only need to worry about color compression and fast clears. */
56 if (tex_res->aux.usage != ISL_AUX_USAGE_CCS_D &&
57 tex_res->aux.usage != ISL_AUX_USAGE_CCS_E &&
58 tex_res->aux.usage != ISL_AUX_USAGE_GFX12_CCS_E)
59 return false;
60
61 for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
62 struct iris_surface *surf = (void *) cso_fb->cbufs[i];
63 if (!surf)
64 continue;
65
66 struct iris_resource *rb_res = (void *) surf->base.texture;
67
68 if (rb_res->bo == tex_res->bo &&
69 surf->base.u.tex.level >= min_level &&
70 surf->base.u.tex.level < min_level + num_levels) {
71 found = draw_aux_buffer_disabled[i] = true;
72 }
73 }
74
75 if (found) {
76 perf_debug(&ice->dbg,
77 "Disabling CCS because a renderbuffer is also bound %s.\n",
78 usage);
79 }
80
81 return found;
82 }
83
84 static void
resolve_sampler_views(struct iris_context * ice,struct iris_batch * batch,struct iris_shader_state * shs,const struct shader_info * info,bool * draw_aux_buffer_disabled,bool consider_framebuffer)85 resolve_sampler_views(struct iris_context *ice,
86 struct iris_batch *batch,
87 struct iris_shader_state *shs,
88 const struct shader_info *info,
89 bool *draw_aux_buffer_disabled,
90 bool consider_framebuffer)
91 {
92 uint32_t views = info ? (shs->bound_sampler_views & info->textures_used[0]) : 0;
93
94 while (views) {
95 const int i = u_bit_scan(&views);
96 struct iris_sampler_view *isv = shs->textures[i];
97
98 if (isv->res->base.b.target != PIPE_BUFFER) {
99 if (consider_framebuffer) {
100 disable_rb_aux_buffer(ice, draw_aux_buffer_disabled, isv->res,
101 isv->view.base_level, isv->view.levels,
102 "for sampling");
103 }
104
105 iris_resource_prepare_texture(ice, isv->res, isv->view.format,
106 isv->view.base_level, isv->view.levels,
107 isv->view.base_array_layer,
108 isv->view.array_len);
109 }
110
111 iris_emit_buffer_barrier_for(batch, isv->res->bo,
112 IRIS_DOMAIN_SAMPLER_READ);
113 }
114 }
115
116 static void
resolve_image_views(struct iris_context * ice,struct iris_batch * batch,struct iris_shader_state * shs,const struct shader_info * info,bool * draw_aux_buffer_disabled,bool consider_framebuffer)117 resolve_image_views(struct iris_context *ice,
118 struct iris_batch *batch,
119 struct iris_shader_state *shs,
120 const struct shader_info *info,
121 bool *draw_aux_buffer_disabled,
122 bool consider_framebuffer)
123 {
124 uint32_t views = info ? (shs->bound_image_views & info->images_used[0]) : 0;
125
126 while (views) {
127 const int i = u_bit_scan(&views);
128 struct pipe_image_view *pview = &shs->image[i].base;
129 struct iris_resource *res = (void *) pview->resource;
130
131 if (res->base.b.target != PIPE_BUFFER) {
132 if (consider_framebuffer) {
133 disable_rb_aux_buffer(ice, draw_aux_buffer_disabled,
134 res, pview->u.tex.level, 1,
135 "as a shader image");
136 }
137
138 unsigned num_layers =
139 pview->u.tex.last_layer - pview->u.tex.first_layer + 1;
140
141 enum isl_aux_usage aux_usage =
142 iris_image_view_aux_usage(ice, pview, info);
143
144 iris_resource_prepare_access(ice, res,
145 pview->u.tex.level, 1,
146 pview->u.tex.first_layer, num_layers,
147 aux_usage, false);
148 }
149
150 iris_emit_buffer_barrier_for(batch, res->bo, IRIS_DOMAIN_DATA_WRITE);
151 }
152 }
153
154 /**
155 * \brief Resolve buffers before drawing.
156 *
157 * Resolve the depth buffer's HiZ buffer, resolve the depth buffer of each
158 * enabled depth texture, and flush the render cache for any dirty textures.
159 */
160 void
iris_predraw_resolve_inputs(struct iris_context * ice,struct iris_batch * batch,bool * draw_aux_buffer_disabled,gl_shader_stage stage,bool consider_framebuffer)161 iris_predraw_resolve_inputs(struct iris_context *ice,
162 struct iris_batch *batch,
163 bool *draw_aux_buffer_disabled,
164 gl_shader_stage stage,
165 bool consider_framebuffer)
166 {
167 struct iris_shader_state *shs = &ice->state.shaders[stage];
168 const struct shader_info *info = iris_get_shader_info(ice, stage);
169
170 uint64_t stage_dirty = (IRIS_STAGE_DIRTY_BINDINGS_VS << stage) |
171 (consider_framebuffer ? IRIS_STAGE_DIRTY_BINDINGS_FS : 0);
172
173 if (ice->state.stage_dirty & stage_dirty) {
174 resolve_sampler_views(ice, batch, shs, info, draw_aux_buffer_disabled,
175 consider_framebuffer);
176 resolve_image_views(ice, batch, shs, info, draw_aux_buffer_disabled,
177 consider_framebuffer);
178 }
179 }
180
181 void
iris_predraw_resolve_framebuffer(struct iris_context * ice,struct iris_batch * batch,bool * draw_aux_buffer_disabled)182 iris_predraw_resolve_framebuffer(struct iris_context *ice,
183 struct iris_batch *batch,
184 bool *draw_aux_buffer_disabled)
185 {
186 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
187 struct iris_screen *screen = (void *) ice->ctx.screen;
188 struct intel_device_info *devinfo = &screen->devinfo;
189 struct iris_uncompiled_shader *ish =
190 ice->shaders.uncompiled[MESA_SHADER_FRAGMENT];
191 const nir_shader *nir = ish->nir;
192
193 if (ice->state.dirty & IRIS_DIRTY_DEPTH_BUFFER) {
194 struct pipe_surface *zs_surf = cso_fb->zsbuf;
195
196 if (zs_surf) {
197 struct iris_resource *z_res, *s_res;
198 iris_get_depth_stencil_resources(zs_surf->texture, &z_res, &s_res);
199 unsigned num_layers =
200 zs_surf->u.tex.last_layer - zs_surf->u.tex.first_layer + 1;
201
202 if (z_res) {
203 iris_resource_prepare_render(ice, z_res, zs_surf->u.tex.level,
204 zs_surf->u.tex.first_layer,
205 num_layers, ice->state.hiz_usage);
206 iris_emit_buffer_barrier_for(batch, z_res->bo,
207 IRIS_DOMAIN_DEPTH_WRITE);
208 }
209
210 if (s_res) {
211 iris_emit_buffer_barrier_for(batch, s_res->bo,
212 IRIS_DOMAIN_DEPTH_WRITE);
213 }
214 }
215 }
216
217 if (devinfo->ver == 8 && nir->info.outputs_read != 0) {
218 for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
219 if (cso_fb->cbufs[i]) {
220 struct iris_surface *surf = (void *) cso_fb->cbufs[i];
221 struct iris_resource *res = (void *) cso_fb->cbufs[i]->texture;
222
223 iris_resource_prepare_texture(ice, res, surf->view.format,
224 surf->view.base_level, 1,
225 surf->view.base_array_layer,
226 surf->view.array_len);
227 }
228 }
229 }
230
231 if (ice->state.stage_dirty & IRIS_STAGE_DIRTY_BINDINGS_FS) {
232 for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
233 struct iris_surface *surf = (void *) cso_fb->cbufs[i];
234 if (!surf)
235 continue;
236
237 struct iris_resource *res = (void *) surf->base.texture;
238
239 enum isl_aux_usage aux_usage =
240 iris_resource_render_aux_usage(ice, res, surf->view.base_level,
241 surf->view.format,
242 draw_aux_buffer_disabled[i]);
243
244 if (ice->state.draw_aux_usage[i] != aux_usage) {
245 ice->state.draw_aux_usage[i] = aux_usage;
246 /* XXX: Need to track which bindings to make dirty */
247 ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER;
248 ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_BINDINGS;
249 }
250
251 iris_resource_prepare_render(ice, res, surf->view.base_level,
252 surf->view.base_array_layer,
253 surf->view.array_len,
254 aux_usage);
255
256 iris_cache_flush_for_render(batch, res->bo, aux_usage);
257 }
258 }
259 }
260
261 /**
262 * \brief Call this after drawing to mark which buffers need resolving
263 *
264 * If the depth buffer was written to and if it has an accompanying HiZ
265 * buffer, then mark that it needs a depth resolve.
266 *
267 * If the color buffer is a multisample window system buffer, then
268 * mark that it needs a downsample.
269 *
270 * Also mark any render targets which will be textured as needing a render
271 * cache flush.
272 */
273 void
iris_postdraw_update_resolve_tracking(struct iris_context * ice,struct iris_batch * batch)274 iris_postdraw_update_resolve_tracking(struct iris_context *ice,
275 struct iris_batch *batch)
276 {
277 struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
278
279 // XXX: front buffer drawing?
280
281 bool may_have_resolved_depth =
282 ice->state.dirty & (IRIS_DIRTY_DEPTH_BUFFER |
283 IRIS_DIRTY_WM_DEPTH_STENCIL);
284
285 struct pipe_surface *zs_surf = cso_fb->zsbuf;
286 if (zs_surf) {
287 struct iris_resource *z_res, *s_res;
288 iris_get_depth_stencil_resources(zs_surf->texture, &z_res, &s_res);
289 unsigned num_layers =
290 zs_surf->u.tex.last_layer - zs_surf->u.tex.first_layer + 1;
291
292 if (z_res) {
293 if (may_have_resolved_depth && ice->state.depth_writes_enabled) {
294 iris_resource_finish_render(ice, z_res, zs_surf->u.tex.level,
295 zs_surf->u.tex.first_layer,
296 num_layers, ice->state.hiz_usage);
297 }
298 }
299
300 if (s_res) {
301 if (may_have_resolved_depth && ice->state.stencil_writes_enabled) {
302 iris_resource_finish_write(ice, s_res, zs_surf->u.tex.level,
303 zs_surf->u.tex.first_layer, num_layers,
304 s_res->aux.usage);
305 }
306 }
307 }
308
309 bool may_have_resolved_color =
310 ice->state.stage_dirty & IRIS_STAGE_DIRTY_BINDINGS_FS;
311
312 for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
313 struct iris_surface *surf = (void *) cso_fb->cbufs[i];
314 if (!surf)
315 continue;
316
317 struct iris_resource *res = (void *) surf->base.texture;
318 enum isl_aux_usage aux_usage = ice->state.draw_aux_usage[i];
319
320 if (may_have_resolved_color) {
321 union pipe_surface_desc *desc = &surf->base.u;
322 unsigned num_layers =
323 desc->tex.last_layer - desc->tex.first_layer + 1;
324 iris_resource_finish_render(ice, res, desc->tex.level,
325 desc->tex.first_layer, num_layers,
326 aux_usage);
327 }
328 }
329 }
330
331 void
iris_cache_flush_for_render(struct iris_batch * batch,struct iris_bo * bo,enum isl_aux_usage aux_usage)332 iris_cache_flush_for_render(struct iris_batch *batch,
333 struct iris_bo *bo,
334 enum isl_aux_usage aux_usage)
335 {
336 iris_emit_buffer_barrier_for(batch, bo, IRIS_DOMAIN_RENDER_WRITE);
337
338 /* Check to see if this bo has been used by a previous rendering operation
339 * but with a different aux usage. If it has, flush the render cache so we
340 * ensure that it's only in there with one aux usage at a time.
341 *
342 * Even though it's not obvious, this can easily happen in practice.
343 * Suppose a client is blending on a surface with sRGB encode enabled on
344 * gfx9. This implies that you get AUX_USAGE_CCS_D at best. If the client
345 * then disables sRGB decode and continues blending we will flip on
346 * AUX_USAGE_CCS_E without doing any sort of resolve in-between (this is
347 * perfectly valid since CCS_E is a subset of CCS_D). However, this means
348 * that we have fragments in-flight which are rendering with UNORM+CCS_E
349 * and other fragments in-flight with SRGB+CCS_D on the same surface at the
350 * same time and the pixel scoreboard and color blender are trying to sort
351 * it all out. This ends badly (i.e. GPU hangs).
352 *
353 * There are comments in various docs which indicate that the render cache
354 * isn't 100% resilient to format changes. However, to date, we have never
355 * observed GPU hangs or even corruption to be associated with switching the
356 * format, only the aux usage. So we let that slide for now.
357 */
358 void *v_aux_usage = (void *) (uintptr_t) aux_usage;
359 struct hash_entry *entry =
360 _mesa_hash_table_search_pre_hashed(batch->cache.render, bo->hash, bo);
361 if (!entry) {
362 _mesa_hash_table_insert_pre_hashed(batch->cache.render, bo->hash, bo,
363 v_aux_usage);
364 } else if (entry->data != v_aux_usage) {
365 iris_emit_pipe_control_flush(batch,
366 "cache tracker: aux usage mismatch",
367 PIPE_CONTROL_RENDER_TARGET_FLUSH |
368 PIPE_CONTROL_TILE_CACHE_FLUSH |
369 PIPE_CONTROL_CS_STALL);
370 entry->data = v_aux_usage;
371 }
372 }
373
374 static void
flush_ubos(struct iris_batch * batch,struct iris_shader_state * shs)375 flush_ubos(struct iris_batch *batch,
376 struct iris_shader_state *shs)
377 {
378 uint32_t cbufs = shs->dirty_cbufs & shs->bound_cbufs;
379
380 while (cbufs) {
381 const int i = u_bit_scan(&cbufs);
382 struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
383 struct iris_resource *res = (void *)cbuf->buffer;
384 iris_emit_buffer_barrier_for(batch, res->bo, IRIS_DOMAIN_PULL_CONSTANT_READ);
385 }
386
387 shs->dirty_cbufs = 0;
388 }
389
390 static void
flush_ssbos(struct iris_batch * batch,struct iris_shader_state * shs)391 flush_ssbos(struct iris_batch *batch,
392 struct iris_shader_state *shs)
393 {
394 uint32_t ssbos = shs->bound_ssbos;
395
396 while (ssbos) {
397 const int i = u_bit_scan(&ssbos);
398 struct pipe_shader_buffer *ssbo = &shs->ssbo[i];
399 struct iris_resource *res = (void *)ssbo->buffer;
400 iris_emit_buffer_barrier_for(batch, res->bo, IRIS_DOMAIN_DATA_WRITE);
401 }
402 }
403
404 void
iris_predraw_flush_buffers(struct iris_context * ice,struct iris_batch * batch,gl_shader_stage stage)405 iris_predraw_flush_buffers(struct iris_context *ice,
406 struct iris_batch *batch,
407 gl_shader_stage stage)
408 {
409 struct iris_shader_state *shs = &ice->state.shaders[stage];
410
411 if (ice->state.stage_dirty & (IRIS_STAGE_DIRTY_CONSTANTS_VS << stage))
412 flush_ubos(batch, shs);
413
414 if (ice->state.stage_dirty & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage))
415 flush_ssbos(batch, shs);
416
417 if (ice->state.streamout_active &&
418 (ice->state.dirty & IRIS_DIRTY_SO_BUFFERS)) {
419 for (int i = 0; i < 4; i++) {
420 struct iris_stream_output_target *tgt = (void *)ice->state.so_target[i];
421 if (tgt) {
422 struct iris_bo *bo = iris_resource_bo(tgt->base.buffer);
423 iris_emit_buffer_barrier_for(batch, bo, IRIS_DOMAIN_OTHER_WRITE);
424 }
425 }
426 }
427 }
428
429 static void
iris_resolve_color(struct iris_context * ice,struct iris_batch * batch,struct iris_resource * res,unsigned level,unsigned layer,enum isl_aux_op resolve_op)430 iris_resolve_color(struct iris_context *ice,
431 struct iris_batch *batch,
432 struct iris_resource *res,
433 unsigned level, unsigned layer,
434 enum isl_aux_op resolve_op)
435 {
436 //DBG("%s to mt %p level %u layer %u\n", __FUNCTION__, mt, level, layer);
437
438 struct blorp_surf surf;
439 iris_blorp_surf_for_resource(&batch->screen->isl_dev, &surf,
440 &res->base.b, res->aux.usage, level, true);
441
442 iris_batch_maybe_flush(batch, 1500);
443
444 /* Ivybridge PRM Vol 2, Part 1, "11.7 MCS Buffer for Render Target(s)":
445 *
446 * "Any transition from any value in {Clear, Render, Resolve} to a
447 * different value in {Clear, Render, Resolve} requires end of pipe
448 * synchronization."
449 *
450 * In other words, fast clear ops are not properly synchronized with
451 * other drawing. We need to use a PIPE_CONTROL to ensure that the
452 * contents of the previous draw hit the render target before we resolve
453 * and again afterwards to ensure that the resolve is complete before we
454 * do any more regular drawing.
455 */
456 iris_emit_end_of_pipe_sync(batch, "color resolve: pre-flush",
457 PIPE_CONTROL_RENDER_TARGET_FLUSH);
458
459 /* Wa_1508744258
460 *
461 * Disable RHWO by setting 0x7010[14] by default except during resolve
462 * pass.
463 *
464 * We implement global disabling of the RHWO optimization during
465 * iris_init_render_context. We toggle it around the blorp resolve call.
466 */
467 assert(resolve_op == ISL_AUX_OP_FULL_RESOLVE ||
468 resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE);
469 batch->screen->vtbl.disable_rhwo_optimization(batch, false);
470
471 iris_batch_sync_region_start(batch);
472 struct blorp_batch blorp_batch;
473 blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
474 blorp_ccs_resolve(&blorp_batch, &surf, level, layer, 1, res->surf.format,
475 resolve_op);
476 blorp_batch_finish(&blorp_batch);
477
478 /* See comment above */
479 iris_emit_end_of_pipe_sync(batch, "color resolve: post-flush",
480 PIPE_CONTROL_RENDER_TARGET_FLUSH);
481
482 batch->screen->vtbl.disable_rhwo_optimization(batch, true);
483
484 iris_batch_sync_region_end(batch);
485 }
486
487 static void
iris_mcs_partial_resolve(struct iris_context * ice,struct iris_batch * batch,struct iris_resource * res,uint32_t start_layer,uint32_t num_layers)488 iris_mcs_partial_resolve(struct iris_context *ice,
489 struct iris_batch *batch,
490 struct iris_resource *res,
491 uint32_t start_layer,
492 uint32_t num_layers)
493 {
494 //DBG("%s to mt %p layers %u-%u\n", __FUNCTION__, mt,
495 //start_layer, start_layer + num_layers - 1);
496
497 assert(isl_aux_usage_has_mcs(res->aux.usage));
498
499 iris_batch_maybe_flush(batch, 1500);
500
501 struct blorp_surf surf;
502 iris_blorp_surf_for_resource(&batch->screen->isl_dev, &surf,
503 &res->base.b, res->aux.usage, 0, true);
504 iris_emit_buffer_barrier_for(batch, res->bo, IRIS_DOMAIN_RENDER_WRITE);
505
506 struct blorp_batch blorp_batch;
507 iris_batch_sync_region_start(batch);
508 blorp_batch_init(&ice->blorp, &blorp_batch, batch, 0);
509 blorp_mcs_partial_resolve(&blorp_batch, &surf, res->surf.format,
510 start_layer, num_layers);
511 blorp_batch_finish(&blorp_batch);
512 iris_batch_sync_region_end(batch);
513 }
514
515 bool
iris_sample_with_depth_aux(const struct intel_device_info * devinfo,const struct iris_resource * res)516 iris_sample_with_depth_aux(const struct intel_device_info *devinfo,
517 const struct iris_resource *res)
518 {
519 switch (res->aux.usage) {
520 case ISL_AUX_USAGE_HIZ:
521 if (devinfo->has_sample_with_hiz)
522 break;
523 return false;
524 case ISL_AUX_USAGE_HIZ_CCS:
525 return false;
526 case ISL_AUX_USAGE_HIZ_CCS_WT:
527 break;
528 default:
529 return false;
530 }
531
532 for (unsigned level = 0; level < res->surf.levels; ++level) {
533 if (!iris_resource_level_has_hiz(res, level))
534 return false;
535 }
536
537 /* From the BDW PRM (Volume 2d: Command Reference: Structures
538 * RENDER_SURFACE_STATE.AuxiliarySurfaceMode):
539 *
540 * "If this field is set to AUX_HIZ, Number of Multisamples must be
541 * MULTISAMPLECOUNT_1, and Surface Type cannot be SURFTYPE_3D.
542 *
543 * There is no such blurb for 1D textures, but there is sufficient evidence
544 * that this is broken on SKL+.
545 */
546 return res->surf.samples == 1 && res->surf.dim == ISL_SURF_DIM_2D;
547 }
548
549 /**
550 * Perform a HiZ or depth resolve operation.
551 *
552 * For an overview of HiZ ops, see the following sections of the Sandy Bridge
553 * PRM, Volume 1, Part 2:
554 * - 7.5.3.1 Depth Buffer Clear
555 * - 7.5.3.2 Depth Buffer Resolve
556 * - 7.5.3.3 Hierarchical Depth Buffer Resolve
557 */
558 void
iris_hiz_exec(struct iris_context * ice,struct iris_batch * batch,struct iris_resource * res,unsigned int level,unsigned int start_layer,unsigned int num_layers,enum isl_aux_op op,bool update_clear_depth)559 iris_hiz_exec(struct iris_context *ice,
560 struct iris_batch *batch,
561 struct iris_resource *res,
562 unsigned int level, unsigned int start_layer,
563 unsigned int num_layers, enum isl_aux_op op,
564 bool update_clear_depth)
565 {
566 assert(iris_resource_level_has_hiz(res, level));
567 assert(op != ISL_AUX_OP_NONE);
568 UNUSED const char *name = NULL;
569
570 iris_batch_maybe_flush(batch, 1500);
571
572 switch (op) {
573 case ISL_AUX_OP_FULL_RESOLVE:
574 name = "depth resolve";
575 break;
576 case ISL_AUX_OP_AMBIGUATE:
577 name = "hiz ambiguate";
578 break;
579 case ISL_AUX_OP_FAST_CLEAR:
580 name = "depth clear";
581 break;
582 case ISL_AUX_OP_PARTIAL_RESOLVE:
583 case ISL_AUX_OP_NONE:
584 unreachable("Invalid HiZ op");
585 }
586
587 //DBG("%s %s to mt %p level %d layers %d-%d\n",
588 //__func__, name, mt, level, start_layer, start_layer + num_layers - 1);
589
590 /* The following stalls and flushes are only documented to be required
591 * for HiZ clear operations. However, they also seem to be required for
592 * resolve operations.
593 *
594 * From the Ivybridge PRM, volume 2, "Depth Buffer Clear":
595 *
596 * "If other rendering operations have preceded this clear, a
597 * PIPE_CONTROL with depth cache flush enabled, Depth Stall bit
598 * enabled must be issued before the rectangle primitive used for
599 * the depth buffer clear operation."
600 *
601 * Same applies for Gfx8 and Gfx9.
602 */
603 iris_emit_pipe_control_flush(batch,
604 "hiz op: pre-flush",
605 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
606 PIPE_CONTROL_DEPTH_STALL |
607 PIPE_CONTROL_CS_STALL);
608
609 iris_batch_sync_region_start(batch);
610
611 struct blorp_surf surf;
612 iris_blorp_surf_for_resource(&batch->screen->isl_dev, &surf,
613 &res->base.b, res->aux.usage, level, true);
614
615 struct blorp_batch blorp_batch;
616 enum blorp_batch_flags flags = 0;
617 flags |= update_clear_depth ? 0 : BLORP_BATCH_NO_UPDATE_CLEAR_COLOR;
618 blorp_batch_init(&ice->blorp, &blorp_batch, batch, flags);
619 blorp_hiz_op(&blorp_batch, &surf, level, start_layer, num_layers, op);
620 blorp_batch_finish(&blorp_batch);
621
622 /* The following stalls and flushes are only documented to be required
623 * for HiZ clear operations. However, they also seem to be required for
624 * resolve operations.
625 *
626 * From the Broadwell PRM, volume 7, "Depth Buffer Clear":
627 *
628 * "Depth buffer clear pass using any of the methods (WM_STATE,
629 * 3DSTATE_WM or 3DSTATE_WM_HZ_OP) must be followed by a
630 * PIPE_CONTROL command with DEPTH_STALL bit and Depth FLUSH bits
631 * "set" before starting to render. DepthStall and DepthFlush are
632 * not needed between consecutive depth clear passes nor is it
633 * required if the depth clear pass was done with
634 * 'full_surf_clear' bit set in the 3DSTATE_WM_HZ_OP."
635 *
636 * TODO: Such as the spec says, this could be conditional.
637 */
638 iris_emit_pipe_control_flush(batch,
639 "hiz op: post flush",
640 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
641 PIPE_CONTROL_DEPTH_STALL);
642
643 iris_batch_sync_region_end(batch);
644 }
645
646 /**
647 * Does the resource's slice have hiz enabled?
648 */
649 bool
iris_resource_level_has_hiz(const struct iris_resource * res,uint32_t level)650 iris_resource_level_has_hiz(const struct iris_resource *res, uint32_t level)
651 {
652 iris_resource_check_level_layer(res, level, 0);
653
654 if (!isl_aux_usage_has_hiz(res->aux.usage))
655 return false;
656
657 /* Disable HiZ for LOD > 0 unless the width/height are 8x4 aligned.
658 * For LOD == 0, we can grow the dimensions to make it work.
659 */
660 if (level > 0) {
661 if (u_minify(res->base.b.width0, level) & 7)
662 return false;
663
664 if (u_minify(res->base.b.height0, level) & 3)
665 return false;
666 }
667
668 return true;
669 }
670
671 /** \brief Assert that the level and layer are valid for the resource. */
672 void
iris_resource_check_level_layer(UNUSED const struct iris_resource * res,UNUSED uint32_t level,UNUSED uint32_t layer)673 iris_resource_check_level_layer(UNUSED const struct iris_resource *res,
674 UNUSED uint32_t level, UNUSED uint32_t layer)
675 {
676 assert(level < res->surf.levels);
677 assert(layer < util_num_layers(&res->base.b, level));
678 }
679
680 static inline uint32_t
miptree_level_range_length(const struct iris_resource * res,uint32_t start_level,uint32_t num_levels)681 miptree_level_range_length(const struct iris_resource *res,
682 uint32_t start_level, uint32_t num_levels)
683 {
684 assert(start_level < res->surf.levels);
685
686 if (num_levels == INTEL_REMAINING_LAYERS)
687 num_levels = res->surf.levels;
688
689 /* Check for overflow */
690 assert(start_level + num_levels >= start_level);
691 assert(start_level + num_levels <= res->surf.levels);
692
693 return num_levels;
694 }
695
696 static inline uint32_t
miptree_layer_range_length(const struct iris_resource * res,uint32_t level,uint32_t start_layer,uint32_t num_layers)697 miptree_layer_range_length(const struct iris_resource *res, uint32_t level,
698 uint32_t start_layer, uint32_t num_layers)
699 {
700 assert(level <= res->base.b.last_level);
701
702 const uint32_t total_num_layers = iris_get_num_logical_layers(res, level);
703 assert(start_layer < total_num_layers);
704 if (num_layers == INTEL_REMAINING_LAYERS)
705 num_layers = total_num_layers - start_layer;
706 /* Check for overflow */
707 assert(start_layer + num_layers >= start_layer);
708 assert(start_layer + num_layers <= total_num_layers);
709
710 return num_layers;
711 }
712
713 bool
iris_has_invalid_primary(const struct iris_resource * res,unsigned start_level,unsigned num_levels,unsigned start_layer,unsigned num_layers)714 iris_has_invalid_primary(const struct iris_resource *res,
715 unsigned start_level, unsigned num_levels,
716 unsigned start_layer, unsigned num_layers)
717 {
718 if (res->aux.usage == ISL_AUX_USAGE_NONE)
719 return false;
720
721 /* Clamp the level range to fit the resource */
722 num_levels = miptree_level_range_length(res, start_level, num_levels);
723
724 for (uint32_t l = 0; l < num_levels; l++) {
725 const uint32_t level = start_level + l;
726 const uint32_t level_layers =
727 miptree_layer_range_length(res, level, start_layer, num_layers);
728 for (unsigned a = 0; a < level_layers; a++) {
729 enum isl_aux_state aux_state =
730 iris_resource_get_aux_state(res, level, start_layer + a);
731 if (!isl_aux_state_has_valid_primary(aux_state))
732 return true;
733 }
734 }
735
736 return false;
737 }
738
739 void
iris_resource_prepare_access(struct iris_context * ice,struct iris_resource * res,uint32_t start_level,uint32_t num_levels,uint32_t start_layer,uint32_t num_layers,enum isl_aux_usage aux_usage,bool fast_clear_supported)740 iris_resource_prepare_access(struct iris_context *ice,
741 struct iris_resource *res,
742 uint32_t start_level, uint32_t num_levels,
743 uint32_t start_layer, uint32_t num_layers,
744 enum isl_aux_usage aux_usage,
745 bool fast_clear_supported)
746 {
747 if (res->aux.usage == ISL_AUX_USAGE_NONE)
748 return;
749
750 /* We can't do resolves on the compute engine, so awkwardly, we have to
751 * do them on the render batch...
752 */
753 struct iris_batch *batch = &ice->batches[IRIS_BATCH_RENDER];
754
755 const uint32_t clamped_levels =
756 miptree_level_range_length(res, start_level, num_levels);
757 for (uint32_t l = 0; l < clamped_levels; l++) {
758 const uint32_t level = start_level + l;
759 const uint32_t level_layers =
760 miptree_layer_range_length(res, level, start_layer, num_layers);
761 for (uint32_t a = 0; a < level_layers; a++) {
762 const uint32_t layer = start_layer + a;
763 const enum isl_aux_state aux_state =
764 iris_resource_get_aux_state(res, level, layer);
765 const enum isl_aux_op aux_op =
766 isl_aux_prepare_access(aux_state, aux_usage, fast_clear_supported);
767
768 /* Prepare the aux buffer for a conditional or unconditional access.
769 * A conditional access is handled by assuming that the access will
770 * not evaluate to a no-op. If the access does in fact occur, the aux
771 * will be in the required state. If it does not, no data is lost
772 * because the aux_op performed is lossless.
773 */
774 if (aux_op == ISL_AUX_OP_NONE) {
775 /* Nothing to do here. */
776 } else if (isl_aux_usage_has_mcs(res->aux.usage)) {
777 assert(aux_op == ISL_AUX_OP_PARTIAL_RESOLVE);
778 iris_mcs_partial_resolve(ice, batch, res, layer, 1);
779 } else if (isl_aux_usage_has_hiz(res->aux.usage)) {
780 iris_hiz_exec(ice, batch, res, level, layer, 1, aux_op, false);
781 } else if (res->aux.usage == ISL_AUX_USAGE_STC_CCS) {
782 unreachable("iris doesn't resolve STC_CCS resources");
783 } else {
784 assert(isl_aux_usage_has_ccs(res->aux.usage));
785 iris_resolve_color(ice, batch, res, level, layer, aux_op);
786 }
787
788 const enum isl_aux_state new_state =
789 isl_aux_state_transition_aux_op(aux_state, res->aux.usage, aux_op);
790 iris_resource_set_aux_state(ice, res, level, layer, 1, new_state);
791 }
792 }
793 }
794
795 void
iris_resource_finish_write(struct iris_context * ice,struct iris_resource * res,uint32_t level,uint32_t start_layer,uint32_t num_layers,enum isl_aux_usage aux_usage)796 iris_resource_finish_write(struct iris_context *ice,
797 struct iris_resource *res, uint32_t level,
798 uint32_t start_layer, uint32_t num_layers,
799 enum isl_aux_usage aux_usage)
800 {
801 if (res->aux.usage == ISL_AUX_USAGE_NONE)
802 return;
803
804 const uint32_t level_layers =
805 miptree_layer_range_length(res, level, start_layer, num_layers);
806
807 for (uint32_t a = 0; a < level_layers; a++) {
808 const uint32_t layer = start_layer + a;
809 const enum isl_aux_state aux_state =
810 iris_resource_get_aux_state(res, level, layer);
811
812 /* Transition the aux state for a conditional or unconditional write. A
813 * conditional write is handled by assuming that the write applies to
814 * only part of the render target. This prevents the new state from
815 * losing the types of compression that might exist in the current state
816 * (e.g. CLEAR). If the write evaluates to a no-op, the state will still
817 * be able to communicate when resolves are necessary (but it may
818 * falsely communicate this as well).
819 */
820 const enum isl_aux_state new_aux_state =
821 isl_aux_state_transition_write(aux_state, aux_usage, false);
822
823 iris_resource_set_aux_state(ice, res, level, layer, 1, new_aux_state);
824 }
825 }
826
827 enum isl_aux_state
iris_resource_get_aux_state(const struct iris_resource * res,uint32_t level,uint32_t layer)828 iris_resource_get_aux_state(const struct iris_resource *res,
829 uint32_t level, uint32_t layer)
830 {
831 iris_resource_check_level_layer(res, level, layer);
832
833 if (res->surf.usage & ISL_SURF_USAGE_DEPTH_BIT) {
834 assert(isl_aux_usage_has_hiz(res->aux.usage));
835 } else {
836 assert(res->surf.samples == 1 ||
837 res->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
838 }
839
840 return res->aux.state[level][layer];
841 }
842
843 void
iris_resource_set_aux_state(struct iris_context * ice,struct iris_resource * res,uint32_t level,uint32_t start_layer,uint32_t num_layers,enum isl_aux_state aux_state)844 iris_resource_set_aux_state(struct iris_context *ice,
845 struct iris_resource *res, uint32_t level,
846 uint32_t start_layer, uint32_t num_layers,
847 enum isl_aux_state aux_state)
848 {
849 num_layers = miptree_layer_range_length(res, level, start_layer, num_layers);
850
851 if (res->surf.usage & ISL_SURF_USAGE_DEPTH_BIT) {
852 assert(iris_resource_level_has_hiz(res, level) ||
853 !isl_aux_state_has_valid_aux(aux_state));
854 } else {
855 assert(res->surf.samples == 1 ||
856 res->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY);
857 }
858
859 for (unsigned a = 0; a < num_layers; a++) {
860 if (res->aux.state[level][start_layer + a] != aux_state) {
861 res->aux.state[level][start_layer + a] = aux_state;
862 /* XXX: Need to track which bindings to make dirty */
863 ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER |
864 IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES |
865 IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES;
866 ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_BINDINGS;
867 }
868 }
869
870 if (res->mod_info && !res->mod_info->supports_clear_color) {
871 assert(res->mod_info->aux_usage != ISL_AUX_USAGE_NONE);
872 if (aux_state == ISL_AUX_STATE_CLEAR ||
873 aux_state == ISL_AUX_STATE_COMPRESSED_CLEAR ||
874 aux_state == ISL_AUX_STATE_PARTIAL_CLEAR) {
875 iris_mark_dirty_dmabuf(ice, &res->base.b);
876 }
877 }
878 }
879
880 enum isl_aux_usage
iris_resource_texture_aux_usage(struct iris_context * ice,const struct iris_resource * res,enum isl_format view_format)881 iris_resource_texture_aux_usage(struct iris_context *ice,
882 const struct iris_resource *res,
883 enum isl_format view_format)
884 {
885 struct iris_screen *screen = (void *) ice->ctx.screen;
886 struct intel_device_info *devinfo = &screen->devinfo;
887
888 switch (res->aux.usage) {
889 case ISL_AUX_USAGE_HIZ:
890 case ISL_AUX_USAGE_HIZ_CCS:
891 case ISL_AUX_USAGE_HIZ_CCS_WT:
892 assert(res->surf.format == view_format);
893 return iris_sample_with_depth_aux(devinfo, res) ?
894 res->aux.usage : ISL_AUX_USAGE_NONE;
895
896 case ISL_AUX_USAGE_MCS:
897 case ISL_AUX_USAGE_MCS_CCS:
898 case ISL_AUX_USAGE_STC_CCS:
899 case ISL_AUX_USAGE_MC:
900 return res->aux.usage;
901
902 case ISL_AUX_USAGE_CCS_E:
903 case ISL_AUX_USAGE_GFX12_CCS_E:
904 /* If we don't have any unresolved color, report an aux usage of
905 * ISL_AUX_USAGE_NONE. This way, texturing won't even look at the
906 * aux surface and we can save some bandwidth.
907 */
908 if (!iris_has_invalid_primary(res, 0, INTEL_REMAINING_LEVELS,
909 0, INTEL_REMAINING_LAYERS))
910 return ISL_AUX_USAGE_NONE;
911
912 /* On Gfx9 color buffers may be compressed by the hardware (lossless
913 * compression). There are, however, format restrictions and care needs
914 * to be taken that the sampler engine is capable for re-interpreting a
915 * buffer with format different the buffer was originally written with.
916 *
917 * For example, SRGB formats are not compressible and the sampler engine
918 * isn't capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case
919 * the underlying color buffer needs to be resolved so that the sampling
920 * surface can be sampled as non-compressed (i.e., without the auxiliary
921 * MCS buffer being set).
922 */
923 if (isl_formats_are_ccs_e_compatible(devinfo, res->surf.format,
924 view_format))
925 return res->aux.usage;
926 break;
927
928 default:
929 break;
930 }
931
932 return ISL_AUX_USAGE_NONE;
933 }
934
935 enum isl_aux_usage
iris_image_view_aux_usage(struct iris_context * ice,const struct pipe_image_view * pview,const struct shader_info * info)936 iris_image_view_aux_usage(struct iris_context *ice,
937 const struct pipe_image_view *pview,
938 const struct shader_info *info)
939 {
940 if (!info)
941 return ISL_AUX_USAGE_NONE;
942
943 const struct iris_screen *screen = (void *) ice->ctx.screen;
944 const struct intel_device_info *devinfo = &screen->devinfo;
945 struct iris_resource *res = (void *) pview->resource;
946
947 enum isl_format view_format = iris_image_view_get_format(ice, pview);
948 enum isl_aux_usage aux_usage =
949 iris_resource_texture_aux_usage(ice, res, view_format);
950
951 bool uses_atomic_load_store =
952 ice->shaders.uncompiled[info->stage]->uses_atomic_load_store;
953
954 /* On GFX12, compressed surfaces supports non-atomic operations. GFX12HP and
955 * further, add support for all the operations.
956 */
957 if (aux_usage == ISL_AUX_USAGE_GFX12_CCS_E &&
958 (devinfo->verx10 >= 125 || !uses_atomic_load_store))
959 return ISL_AUX_USAGE_GFX12_CCS_E;
960
961 return ISL_AUX_USAGE_NONE;
962 }
963
964 bool
iris_can_sample_mcs_with_clear(const struct intel_device_info * devinfo,const struct iris_resource * res)965 iris_can_sample_mcs_with_clear(const struct intel_device_info *devinfo,
966 const struct iris_resource *res)
967 {
968 assert(isl_aux_usage_has_mcs(res->aux.usage));
969
970 /* On TGL, the sampler has an issue with some 8 and 16bpp MSAA fast clears.
971 * See HSD 1707282275, wa_14013111325. Due to the use of
972 * format-reinterpretation, a simplified workaround is implemented.
973 */
974 if (devinfo->ver >= 12 &&
975 isl_format_get_layout(res->surf.format)->bpb <= 16) {
976 return false;
977 }
978
979 return true;
980 }
981
982 static bool
isl_formats_are_fast_clear_compatible(enum isl_format a,enum isl_format b)983 isl_formats_are_fast_clear_compatible(enum isl_format a, enum isl_format b)
984 {
985 /* On gfx8 and earlier, the hardware was only capable of handling 0/1 clear
986 * values so sRGB curve application was a no-op for all fast-clearable
987 * formats.
988 *
989 * On gfx9+, the hardware supports arbitrary clear values. For sRGB clear
990 * values, the hardware interprets the floats, not as what would be
991 * returned from the sampler (or written by the shader), but as being
992 * between format conversion and sRGB curve application. This means that
993 * we can switch between sRGB and UNORM without having to whack the clear
994 * color.
995 */
996 return isl_format_srgb_to_linear(a) == isl_format_srgb_to_linear(b);
997 }
998
999 void
iris_resource_prepare_texture(struct iris_context * ice,struct iris_resource * res,enum isl_format view_format,uint32_t start_level,uint32_t num_levels,uint32_t start_layer,uint32_t num_layers)1000 iris_resource_prepare_texture(struct iris_context *ice,
1001 struct iris_resource *res,
1002 enum isl_format view_format,
1003 uint32_t start_level, uint32_t num_levels,
1004 uint32_t start_layer, uint32_t num_layers)
1005 {
1006 const struct iris_screen *screen = (void *) ice->ctx.screen;
1007 const struct intel_device_info *devinfo = &screen->devinfo;
1008
1009 enum isl_aux_usage aux_usage =
1010 iris_resource_texture_aux_usage(ice, res, view_format);
1011
1012 bool clear_supported = isl_aux_usage_has_fast_clears(aux_usage);
1013
1014 /* Clear color is specified as ints or floats and the conversion is done by
1015 * the sampler. If we have a texture view, we would have to perform the
1016 * clear color conversion manually. Just disable clear color.
1017 */
1018 if (!isl_formats_are_fast_clear_compatible(res->surf.format, view_format))
1019 clear_supported = false;
1020
1021 if (isl_aux_usage_has_mcs(aux_usage) &&
1022 !iris_can_sample_mcs_with_clear(devinfo, res)) {
1023 clear_supported = false;
1024 }
1025
1026 iris_resource_prepare_access(ice, res, start_level, num_levels,
1027 start_layer, num_layers,
1028 aux_usage, clear_supported);
1029 }
1030
1031 /* Whether or not rendering a color value with either format results in the
1032 * same pixel. This can return false negatives.
1033 */
1034 bool
iris_render_formats_color_compatible(enum isl_format a,enum isl_format b,union isl_color_value color,bool clear_color_unknown)1035 iris_render_formats_color_compatible(enum isl_format a, enum isl_format b,
1036 union isl_color_value color,
1037 bool clear_color_unknown)
1038 {
1039 if (a == b)
1040 return true;
1041
1042 /* A difference in color space doesn't matter for 0/1 values. */
1043 if (!clear_color_unknown &&
1044 isl_format_srgb_to_linear(a) == isl_format_srgb_to_linear(b) &&
1045 isl_color_value_is_zero_one(color, a)) {
1046 return true;
1047 }
1048
1049 return false;
1050 }
1051
1052 enum isl_aux_usage
iris_resource_render_aux_usage(struct iris_context * ice,struct iris_resource * res,uint32_t level,enum isl_format render_format,bool draw_aux_disabled)1053 iris_resource_render_aux_usage(struct iris_context *ice,
1054 struct iris_resource *res, uint32_t level,
1055 enum isl_format render_format,
1056 bool draw_aux_disabled)
1057 {
1058 struct iris_screen *screen = (void *) ice->ctx.screen;
1059 struct intel_device_info *devinfo = &screen->devinfo;
1060
1061 if (draw_aux_disabled)
1062 return ISL_AUX_USAGE_NONE;
1063
1064 switch (res->aux.usage) {
1065 case ISL_AUX_USAGE_HIZ:
1066 case ISL_AUX_USAGE_HIZ_CCS:
1067 case ISL_AUX_USAGE_HIZ_CCS_WT:
1068 assert(render_format == res->surf.format);
1069 return iris_resource_level_has_hiz(res, level) ?
1070 res->aux.usage : ISL_AUX_USAGE_NONE;
1071
1072 case ISL_AUX_USAGE_STC_CCS:
1073 assert(render_format == res->surf.format);
1074 return res->aux.usage;
1075
1076 case ISL_AUX_USAGE_MCS:
1077 case ISL_AUX_USAGE_MCS_CCS:
1078 return res->aux.usage;
1079
1080 case ISL_AUX_USAGE_CCS_D:
1081 case ISL_AUX_USAGE_CCS_E:
1082 case ISL_AUX_USAGE_GFX12_CCS_E:
1083 /* Disable CCS for some cases of texture-view rendering. On gfx12, HW
1084 * may convert some subregions of shader output to fast-cleared blocks
1085 * if CCS is enabled and the shader output matches the clear color.
1086 * Existing fast-cleared blocks are correctly interpreted by the clear
1087 * color and the resource format (see can_fast_clear_color). To avoid
1088 * gaining new fast-cleared blocks that can't be interpreted by the
1089 * resource format (and to avoid misinterpreting existing ones), shut
1090 * off CCS when the interpretation of the clear color differs between
1091 * the render_format and the resource format.
1092 */
1093 if (!iris_render_formats_color_compatible(render_format,
1094 res->surf.format,
1095 res->aux.clear_color,
1096 res->aux.clear_color_unknown)) {
1097 return ISL_AUX_USAGE_NONE;
1098 }
1099
1100 if (res->aux.usage == ISL_AUX_USAGE_CCS_D)
1101 return ISL_AUX_USAGE_CCS_D;
1102
1103 if (isl_formats_are_ccs_e_compatible(devinfo, res->surf.format,
1104 render_format)) {
1105 return res->aux.usage;
1106 }
1107 FALLTHROUGH;
1108
1109 default:
1110 return ISL_AUX_USAGE_NONE;
1111 }
1112 }
1113
1114 void
iris_resource_prepare_render(struct iris_context * ice,struct iris_resource * res,uint32_t level,uint32_t start_layer,uint32_t layer_count,enum isl_aux_usage aux_usage)1115 iris_resource_prepare_render(struct iris_context *ice,
1116 struct iris_resource *res, uint32_t level,
1117 uint32_t start_layer, uint32_t layer_count,
1118 enum isl_aux_usage aux_usage)
1119 {
1120 iris_resource_prepare_access(ice, res, level, 1, start_layer,
1121 layer_count, aux_usage,
1122 isl_aux_usage_has_fast_clears(aux_usage));
1123 }
1124
1125 void
iris_resource_finish_render(struct iris_context * ice,struct iris_resource * res,uint32_t level,uint32_t start_layer,uint32_t layer_count,enum isl_aux_usage aux_usage)1126 iris_resource_finish_render(struct iris_context *ice,
1127 struct iris_resource *res, uint32_t level,
1128 uint32_t start_layer, uint32_t layer_count,
1129 enum isl_aux_usage aux_usage)
1130 {
1131 iris_resource_finish_write(ice, res, level, start_layer, layer_count,
1132 aux_usage);
1133 }
1134