• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3  * Copyright 2018 Advanced Micro Devices, Inc.
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * on the rights to use, copy, modify, merge, publish, distribute, sub
10  * license, and/or sell copies of the Software, and to permit persons to whom
11  * the Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
20  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
21  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23  * USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 
26 #include "si_build_pm4.h"
27 #include "si_pipe.h"
28 #include "sid.h"
29 #include "util/os_time.h"
30 #include "util/u_log.h"
31 #include "util/u_upload_mgr.h"
32 #include "ac_debug.h"
33 
si_flush_gfx_cs(struct si_context * ctx,unsigned flags,struct pipe_fence_handle ** fence)34 void si_flush_gfx_cs(struct si_context *ctx, unsigned flags, struct pipe_fence_handle **fence)
35 {
36    struct radeon_cmdbuf *cs = &ctx->gfx_cs;
37    struct radeon_winsys *ws = ctx->ws;
38    struct si_screen *sscreen = ctx->screen;
39    const unsigned wait_ps_cs = SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_CS_PARTIAL_FLUSH;
40    unsigned wait_flags = 0;
41 
42    if (ctx->gfx_flush_in_progress)
43       return;
44 
45    /* The amdgpu kernel driver synchronizes execution for shared DMABUFs between
46     * processes on DRM >= 3.39.0, so we don't have to wait at the end of IBs to
47     * make sure everything is idle.
48     *
49     * The amdgpu winsys synchronizes execution for buffers shared by different
50     * contexts within the same process.
51     *
52     * Interop with AMDVLK, RADV, or OpenCL within the same process requires
53     * explicit fences or glFinish.
54     */
55    if (sscreen->info.is_amdgpu && sscreen->info.drm_minor >= 39)
56       flags |= RADEON_FLUSH_START_NEXT_GFX_IB_NOW;
57 
58    if (ctx->gfx_level == GFX6) {
59       /* The kernel flushes L2 before shaders are finished. */
60       wait_flags |= wait_ps_cs;
61    } else if (!(flags & RADEON_FLUSH_START_NEXT_GFX_IB_NOW) ||
62               ((flags & RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION) &&
63                 !ws->cs_is_secure(cs))) {
64       /* TODO: this workaround fixes subtitles rendering with mpv -vo=vaapi and
65        * tmz but shouldn't be necessary.
66        */
67       wait_flags |= wait_ps_cs;
68    }
69 
70    /* Drop this flush if it's a no-op. */
71    if (!radeon_emitted(cs, ctx->initial_gfx_cs_size) &&
72        (!wait_flags || !ctx->gfx_last_ib_is_busy) &&
73        !(flags & RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION)) {
74       tc_driver_internal_flush_notify(ctx->tc);
75       return;
76    }
77 
78    /* Non-aux contexts must set up no-op API dispatch on GPU resets. This is
79     * similar to si_get_reset_status but here we can ignore soft-recoveries,
80     * while si_get_reset_status can't. */
81    if (!(ctx->context_flags & SI_CONTEXT_FLAG_AUX) &&
82        ctx->device_reset_callback.reset) {
83       enum pipe_reset_status status = ctx->ws->ctx_query_reset_status(ctx->ctx, true, NULL);
84       if (status != PIPE_NO_RESET)
85          ctx->device_reset_callback.reset(ctx->device_reset_callback.data, status);
86    }
87 
88    if (sscreen->debug_flags & DBG(CHECK_VM))
89       flags &= ~PIPE_FLUSH_ASYNC;
90 
91    ctx->gfx_flush_in_progress = true;
92 
93    if (ctx->has_graphics) {
94       if (!list_is_empty(&ctx->active_queries))
95          si_suspend_queries(ctx);
96 
97       ctx->streamout.suspended = false;
98       if (ctx->streamout.begin_emitted) {
99          si_emit_streamout_end(ctx);
100          ctx->streamout.suspended = true;
101 
102          /* Since NGG streamout uses GDS, we need to make GDS
103           * idle when we leave the IB, otherwise another process
104           * might overwrite it while our shaders are busy.
105           */
106          if (sscreen->use_ngg_streamout)
107             wait_flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
108       }
109    }
110 
111    /* Make sure CP DMA is idle at the end of IBs after L2 prefetches
112     * because the kernel doesn't wait for it. */
113    if (ctx->gfx_level >= GFX7)
114       si_cp_dma_wait_for_idle(ctx, &ctx->gfx_cs);
115 
116    /* If we use s_sendmsg to set tess factors to all 0 or all 1 instead of writing to the tess
117     * factor buffer, we need this at the end of command buffers:
118     */
119    if (ctx->gfx_level == GFX11 && ctx->tess_rings) {
120       radeon_begin(cs);
121       radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
122       radeon_emit(EVENT_TYPE(V_028A90_SQ_NON_EVENT) | EVENT_INDEX(0));
123       radeon_end();
124    }
125 
126    /* Wait for draw calls to finish if needed. */
127    if (wait_flags) {
128       ctx->flags |= wait_flags;
129       ctx->emit_cache_flush(ctx, &ctx->gfx_cs);
130    }
131    ctx->gfx_last_ib_is_busy = (wait_flags & wait_ps_cs) != wait_ps_cs;
132 
133    if (ctx->current_saved_cs) {
134       si_trace_emit(ctx);
135 
136       /* Save the IB for debug contexts. */
137       si_save_cs(ws, cs, &ctx->current_saved_cs->gfx, true);
138       ctx->current_saved_cs->flushed = true;
139       ctx->current_saved_cs->time_flush = os_time_get_nano();
140 
141       si_log_hw_flush(ctx);
142    }
143 
144    if (sscreen->debug_flags & DBG(IB))
145       si_print_current_ib(ctx, stderr);
146 
147    if (ctx->is_noop)
148       flags |= RADEON_FLUSH_NOOP;
149 
150    /* Flush the CS. */
151    ws->cs_flush(cs, flags, &ctx->last_gfx_fence);
152 
153    tc_driver_internal_flush_notify(ctx->tc);
154    if (fence)
155       ws->fence_reference(fence, ctx->last_gfx_fence);
156 
157    ctx->num_gfx_cs_flushes++;
158 
159    /* Check VM faults if needed. */
160    if (sscreen->debug_flags & DBG(CHECK_VM)) {
161       /* Use conservative timeout 800ms, after which we won't wait any
162        * longer and assume the GPU is hung.
163        */
164       ctx->ws->fence_wait(ctx->ws, ctx->last_gfx_fence, 800 * 1000 * 1000);
165 
166       si_check_vm_faults(ctx, &ctx->current_saved_cs->gfx, AMD_IP_GFX);
167    }
168 
169    if (unlikely(ctx->thread_trace &&
170                 (flags & PIPE_FLUSH_END_OF_FRAME))) {
171       si_handle_thread_trace(ctx, &ctx->gfx_cs);
172    }
173 
174    if (ctx->current_saved_cs)
175       si_saved_cs_reference(&ctx->current_saved_cs, NULL);
176 
177    si_begin_new_gfx_cs(ctx, false);
178    ctx->gfx_flush_in_progress = false;
179 }
180 
si_begin_gfx_cs_debug(struct si_context * ctx)181 static void si_begin_gfx_cs_debug(struct si_context *ctx)
182 {
183    static const uint32_t zeros[1];
184    assert(!ctx->current_saved_cs);
185 
186    ctx->current_saved_cs = calloc(1, sizeof(*ctx->current_saved_cs));
187    if (!ctx->current_saved_cs)
188       return;
189 
190    pipe_reference_init(&ctx->current_saved_cs->reference, 1);
191 
192    ctx->current_saved_cs->trace_buf =
193       si_resource(pipe_buffer_create(ctx->b.screen, 0, PIPE_USAGE_STAGING, 4));
194    if (!ctx->current_saved_cs->trace_buf) {
195       free(ctx->current_saved_cs);
196       ctx->current_saved_cs = NULL;
197       return;
198    }
199 
200    pipe_buffer_write_nooverlap(&ctx->b, &ctx->current_saved_cs->trace_buf->b.b, 0, sizeof(zeros),
201                                zeros);
202    ctx->current_saved_cs->trace_id = 0;
203 
204    si_trace_emit(ctx);
205 
206    radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->current_saved_cs->trace_buf,
207                              RADEON_USAGE_READWRITE | RADEON_PRIO_FENCE_TRACE);
208 }
209 
si_add_gds_to_buffer_list(struct si_context * sctx)210 static void si_add_gds_to_buffer_list(struct si_context *sctx)
211 {
212    if (sctx->screen->gds) {
213       sctx->ws->cs_add_buffer(&sctx->gfx_cs, sctx->screen->gds, RADEON_USAGE_READWRITE, 0);
214       if (sctx->screen->gds_oa) {
215          sctx->ws->cs_add_buffer(&sctx->gfx_cs, sctx->screen->gds_oa, RADEON_USAGE_READWRITE, 0);
216       }
217    }
218 }
219 
si_allocate_gds(struct si_context * sctx)220 void si_allocate_gds(struct si_context *sctx)
221 {
222    struct radeon_winsys *ws = sctx->ws;
223 
224    if (sctx->screen->gds && sctx->screen->gds_oa)
225       return;
226 
227    assert(sctx->screen->use_ngg_streamout);
228 
229    /* We need 256B (64 dw) of GDS, otherwise streamout hangs. */
230    simple_mtx_lock(&sctx->screen->gds_mutex);
231    if (!sctx->screen->gds)
232       sctx->screen->gds = ws->buffer_create(ws, 256, 4, RADEON_DOMAIN_GDS, RADEON_FLAG_DRIVER_INTERNAL);
233    if (!sctx->screen->gds_oa)
234       sctx->screen->gds_oa = ws->buffer_create(ws, 1, 1, RADEON_DOMAIN_OA, RADEON_FLAG_DRIVER_INTERNAL);
235    simple_mtx_unlock(&sctx->screen->gds_mutex);
236 
237    assert(sctx->screen->gds && sctx->screen->gds_oa);
238 
239    si_add_gds_to_buffer_list(sctx);
240 }
241 
si_set_tracked_regs_to_clear_state(struct si_context * ctx)242 void si_set_tracked_regs_to_clear_state(struct si_context *ctx)
243 {
244    STATIC_ASSERT(SI_NUM_TRACKED_REGS <= sizeof(ctx->tracked_regs.reg_saved) * 8);
245 
246    ctx->tracked_regs.reg_value[SI_TRACKED_DB_RENDER_CONTROL] = 0x00000000;
247    ctx->tracked_regs.reg_value[SI_TRACKED_DB_COUNT_CONTROL] = 0x00000000;
248    ctx->tracked_regs.reg_value[SI_TRACKED_DB_RENDER_OVERRIDE2] = 0x00000000;
249    ctx->tracked_regs.reg_value[SI_TRACKED_DB_SHADER_CONTROL] = 0x00000000;
250    ctx->tracked_regs.reg_value[SI_TRACKED_CB_TARGET_MASK] = 0xffffffff;
251    ctx->tracked_regs.reg_value[SI_TRACKED_CB_DCC_CONTROL] = 0x00000000;
252    ctx->tracked_regs.reg_value[SI_TRACKED_SX_PS_DOWNCONVERT] = 0x00000000;
253    ctx->tracked_regs.reg_value[SI_TRACKED_SX_BLEND_OPT_EPSILON] = 0x00000000;
254    ctx->tracked_regs.reg_value[SI_TRACKED_SX_BLEND_OPT_CONTROL] = 0x00000000;
255    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_LINE_CNTL] = 0x00001000;
256    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_AA_CONFIG] = 0x00000000;
257    ctx->tracked_regs.reg_value[SI_TRACKED_DB_EQAA] = 0x00000000;
258    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_MODE_CNTL_1] = 0x00000000;
259    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_PRIM_FILTER_CNTL] = 0;
260    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_SMALL_PRIM_FILTER_CNTL] = 0x00000000;
261    ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_VS_OUT_CNTL] = 0x00000000;
262    ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_CLIP_CNTL] = 0x00090000;
263    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_BINNER_CNTL_0] = 0x00000003;
264    ctx->tracked_regs.reg_value[SI_TRACKED_DB_PA_SC_VRS_OVERRIDE_CNTL] = 0x00000000;
265    ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ] = 0x3f800000;
266    ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_VERT_DISC_ADJ] = 0x3f800000;
267    ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_HORZ_CLIP_ADJ] = 0x3f800000;
268    ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_HORZ_DISC_ADJ] = 0x3f800000;
269    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET] = 0;
270    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_VTX_CNTL] = 0x00000005;
271    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_CLIPRECT_RULE] = 0xffff;
272    ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_LINE_STIPPLE] = 0;
273    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_ESGS_RING_ITEMSIZE]  = 0x00000000;
274    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_1]  = 0x00000000;
275    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_2]  = 0x00000000;
276    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_3]  = 0x00000000;
277    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_ITEMSIZE]  = 0x00000000;
278    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_MAX_VERT_OUT]  = 0x00000000;
279    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE]  = 0x00000000;
280    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_1]  = 0x00000000;
281    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_2]  = 0x00000000;
282    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_3]  = 0x00000000;
283    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_INSTANCE_CNT]  = 0x00000000;
284    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_ONCHIP_CNTL]  = 0x00000000;
285    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP]  = 0x00000000;
286    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_MODE]  = 0x00000000;
287    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_PRIMITIVEID_EN]  = 0x00000000;
288    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_REUSE_OFF]  = 0x00000000;
289    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_VS_OUT_CONFIG]  = 0x00000000;
290    ctx->tracked_regs.reg_value[SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP]  = 0x00000000;
291    ctx->tracked_regs.reg_value[SI_TRACKED_GE_NGG_SUBGRP_CNTL]  = 0x00000000;
292    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_IDX_FORMAT]  = 0x00000000;
293    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_POS_FORMAT]  = 0x00000000;
294    ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_VTE_CNTL]  = 0x00000000;
295    ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_NGG_CNTL]  = 0x00000000;
296    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_INPUT_ENA]  = 0x00000000;
297    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_INPUT_ADDR]  = 0x00000000;
298    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_BARYC_CNTL]  = 0x00000000;
299    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_IN_CONTROL]  = 0x00000002;
300    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_Z_FORMAT]  = 0x00000000;
301    ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_COL_FORMAT]  = 0x00000000;
302    ctx->tracked_regs.reg_value[SI_TRACKED_CB_SHADER_MASK]  = 0xffffffff;
303    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_TF_PARAM]  = 0x00000000;
304    ctx->tracked_regs.reg_value[SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL]  = 0x0000001e; /* From GFX8 */
305 
306    /* Set all cleared context registers to saved. */
307    ctx->tracked_regs.reg_saved = BITFIELD64_MASK(SI_TRACKED_GE_PC_ALLOC);
308 
309    if (ctx->gfx_level >= GFX11)
310       ctx->last_gs_out_prim = -1; /* uconfig register, unknown value */
311    else
312       ctx->last_gs_out_prim = 0; /* context register cleared by CLEAR_STATE */
313 }
314 
si_install_draw_wrapper(struct si_context * sctx,pipe_draw_vbo_func wrapper,pipe_draw_vertex_state_func vstate_wrapper)315 void si_install_draw_wrapper(struct si_context *sctx, pipe_draw_vbo_func wrapper,
316                              pipe_draw_vertex_state_func vstate_wrapper)
317 {
318    if (wrapper) {
319       if (wrapper != sctx->b.draw_vbo) {
320          assert(!sctx->real_draw_vbo);
321          assert(!sctx->real_draw_vertex_state);
322          sctx->real_draw_vbo = sctx->b.draw_vbo;
323          sctx->real_draw_vertex_state = sctx->b.draw_vertex_state;
324          sctx->b.draw_vbo = wrapper;
325          sctx->b.draw_vertex_state = vstate_wrapper;
326       }
327    } else if (sctx->real_draw_vbo) {
328       sctx->real_draw_vbo = NULL;
329       sctx->real_draw_vertex_state = NULL;
330       si_select_draw_vbo(sctx);
331    }
332 }
333 
si_tmz_preamble(struct si_context * sctx)334 static void si_tmz_preamble(struct si_context *sctx)
335 {
336    bool secure = si_gfx_resources_check_encrypted(sctx);
337    if (secure != sctx->ws->cs_is_secure(&sctx->gfx_cs)) {
338       si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW |
339                             RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION, NULL);
340    }
341 }
342 
si_draw_vbo_tmz_preamble(struct pipe_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)343 static void si_draw_vbo_tmz_preamble(struct pipe_context *ctx,
344                                      const struct pipe_draw_info *info,
345                                      unsigned drawid_offset,
346                                      const struct pipe_draw_indirect_info *indirect,
347                                      const struct pipe_draw_start_count_bias *draws,
348                                      unsigned num_draws) {
349    struct si_context *sctx = (struct si_context *)ctx;
350 
351    si_tmz_preamble(sctx);
352    sctx->real_draw_vbo(ctx, info, drawid_offset, indirect, draws, num_draws);
353 }
354 
si_draw_vstate_tmz_preamble(struct pipe_context * ctx,struct pipe_vertex_state * state,uint32_t partial_velem_mask,struct pipe_draw_vertex_state_info info,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)355 static void si_draw_vstate_tmz_preamble(struct pipe_context *ctx,
356                                         struct pipe_vertex_state *state,
357                                         uint32_t partial_velem_mask,
358                                         struct pipe_draw_vertex_state_info info,
359                                         const struct pipe_draw_start_count_bias *draws,
360                                         unsigned num_draws) {
361    struct si_context *sctx = (struct si_context *)ctx;
362 
363    si_tmz_preamble(sctx);
364    sctx->real_draw_vertex_state(ctx, state, partial_velem_mask, info, draws, num_draws);
365 }
366 
si_begin_new_gfx_cs(struct si_context * ctx,bool first_cs)367 void si_begin_new_gfx_cs(struct si_context *ctx, bool first_cs)
368 {
369    bool is_secure = false;
370 
371    if (unlikely(radeon_uses_secure_bos(ctx->ws))) {
372       is_secure = ctx->ws->cs_is_secure(&ctx->gfx_cs);
373 
374       si_install_draw_wrapper(ctx, si_draw_vbo_tmz_preamble,
375                               si_draw_vstate_tmz_preamble);
376    }
377 
378    if (ctx->is_debug)
379       si_begin_gfx_cs_debug(ctx);
380 
381    si_add_gds_to_buffer_list(ctx);
382 
383    /* Always invalidate caches at the beginning of IBs, because external
384     * users (e.g. BO evictions and SDMA/UVD/VCE IBs) can modify our
385     * buffers.
386     *
387     * Note that the cache flush done by the kernel at the end of GFX IBs
388     * isn't useful here, because that flush can finish after the following
389     * IB starts drawing.
390     *
391     * TODO: Do we also need to invalidate CB & DB caches?
392     */
393    ctx->flags |= SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
394                  SI_CONTEXT_INV_L2 | SI_CONTEXT_START_PIPELINE_STATS;
395    ctx->pipeline_stats_enabled = -1;
396 
397    /* We don't know if the last draw used NGG because it can be a different process.
398     * When switching NGG->legacy, we need to flush VGT for certain hw generations.
399     */
400    if (ctx->screen->info.has_vgt_flush_ngg_legacy_bug && !ctx->ngg)
401       ctx->flags |= SI_CONTEXT_VGT_FLUSH;
402 
403    if (ctx->screen->attribute_ring) {
404       radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->screen->attribute_ring,
405                                 RADEON_USAGE_READWRITE | RADEON_PRIO_SHADER_RINGS);
406    }
407    if (ctx->border_color_buffer) {
408       radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->border_color_buffer,
409                                 RADEON_USAGE_READ | RADEON_PRIO_BORDER_COLORS);
410    }
411    if (ctx->shadowed_regs) {
412       radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->shadowed_regs,
413                                 RADEON_USAGE_READWRITE | RADEON_PRIO_DESCRIPTORS);
414    }
415 
416    si_add_all_descriptors_to_bo_list(ctx);
417 
418    if (first_cs || !ctx->shadowed_regs) {
419       si_shader_pointers_mark_dirty(ctx);
420       ctx->cs_shader_state.initialized = false;
421    }
422 
423    if (!ctx->has_graphics) {
424       ctx->initial_gfx_cs_size = ctx->gfx_cs.current.cdw;
425       return;
426    }
427 
428    if (ctx->tess_rings) {
429       radeon_add_to_buffer_list(ctx, &ctx->gfx_cs,
430                                 unlikely(is_secure) ? si_resource(ctx->tess_rings_tmz) : si_resource(ctx->tess_rings),
431                                 RADEON_USAGE_READWRITE | RADEON_PRIO_SHADER_RINGS);
432    }
433 
434    /* set all valid group as dirty so they get reemited on
435     * next draw command
436     */
437    si_pm4_reset_emitted(ctx, first_cs);
438 
439    /* The CS initialization should be emitted before everything else. */
440    if (ctx->cs_preamble_state) {
441       struct si_pm4_state *preamble = is_secure ? ctx->cs_preamble_state_tmz :
442                                                   ctx->cs_preamble_state;
443       ctx->ws->cs_set_preamble(&ctx->gfx_cs, preamble->pm4, preamble->ndw,
444                                preamble != ctx->last_preamble);
445       ctx->last_preamble = preamble;
446    }
447 
448    if (ctx->queued.named.ls)
449       ctx->prefetch_L2_mask |= SI_PREFETCH_LS;
450    if (ctx->queued.named.hs)
451       ctx->prefetch_L2_mask |= SI_PREFETCH_HS;
452    if (ctx->queued.named.es)
453       ctx->prefetch_L2_mask |= SI_PREFETCH_ES;
454    if (ctx->queued.named.gs)
455       ctx->prefetch_L2_mask |= SI_PREFETCH_GS;
456    if (ctx->queued.named.vs)
457       ctx->prefetch_L2_mask |= SI_PREFETCH_VS;
458    if (ctx->queued.named.ps)
459       ctx->prefetch_L2_mask |= SI_PREFETCH_PS;
460 
461    /* CLEAR_STATE disables all colorbuffers, so only enable bound ones. */
462    bool has_clear_state = ctx->screen->info.has_clear_state;
463    if (has_clear_state || ctx->shadowed_regs) {
464       ctx->framebuffer.dirty_cbufs =
465             u_bit_consecutive(0, ctx->framebuffer.state.nr_cbufs);
466       /* CLEAR_STATE disables the zbuffer, so only enable it if it's bound. */
467       ctx->framebuffer.dirty_zsbuf = ctx->framebuffer.state.zsbuf != NULL;
468    } else {
469       ctx->framebuffer.dirty_cbufs = u_bit_consecutive(0, 8);
470       ctx->framebuffer.dirty_zsbuf = true;
471    }
472 
473    /* Even with shadowed registers, we have to add buffers to the buffer list.
474     * These atoms are the only ones that add buffers.
475     */
476    si_mark_atom_dirty(ctx, &ctx->atoms.s.framebuffer);
477    si_mark_atom_dirty(ctx, &ctx->atoms.s.render_cond);
478    if (ctx->screen->use_ngg_culling)
479       si_mark_atom_dirty(ctx, &ctx->atoms.s.ngg_cull_state);
480 
481    if (first_cs || !ctx->shadowed_regs) {
482       /* These don't add any buffers, so skip them with shadowing. */
483       si_mark_atom_dirty(ctx, &ctx->atoms.s.clip_regs);
484       /* CLEAR_STATE sets zeros. */
485       if (!has_clear_state || ctx->clip_state_any_nonzeros)
486          si_mark_atom_dirty(ctx, &ctx->atoms.s.clip_state);
487       ctx->sample_locs_num_samples = 0;
488       si_mark_atom_dirty(ctx, &ctx->atoms.s.msaa_sample_locs);
489       si_mark_atom_dirty(ctx, &ctx->atoms.s.msaa_config);
490       /* CLEAR_STATE sets 0xffff. */
491       if (!has_clear_state || ctx->sample_mask != 0xffff)
492          si_mark_atom_dirty(ctx, &ctx->atoms.s.sample_mask);
493       si_mark_atom_dirty(ctx, &ctx->atoms.s.cb_render_state);
494       /* CLEAR_STATE sets zeros. */
495       if (!has_clear_state || ctx->blend_color_any_nonzeros)
496          si_mark_atom_dirty(ctx, &ctx->atoms.s.blend_color);
497       si_mark_atom_dirty(ctx, &ctx->atoms.s.db_render_state);
498       if (ctx->gfx_level >= GFX9)
499          si_mark_atom_dirty(ctx, &ctx->atoms.s.dpbb_state);
500       si_mark_atom_dirty(ctx, &ctx->atoms.s.stencil_ref);
501       si_mark_atom_dirty(ctx, &ctx->atoms.s.spi_map);
502       if (!ctx->screen->use_ngg_streamout)
503          si_mark_atom_dirty(ctx, &ctx->atoms.s.streamout_enable);
504       /* CLEAR_STATE disables all window rectangles. */
505       if (!has_clear_state || ctx->num_window_rectangles > 0)
506          si_mark_atom_dirty(ctx, &ctx->atoms.s.window_rectangles);
507       si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
508       si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
509       si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
510 
511       /* Invalidate various draw states so that they are emitted before
512        * the first draw call. */
513       si_invalidate_draw_constants(ctx);
514       ctx->last_index_size = -1;
515       ctx->last_primitive_restart_en = -1;
516       ctx->last_restart_index = SI_RESTART_INDEX_UNKNOWN;
517       ctx->last_prim = -1;
518       ctx->last_multi_vgt_param = -1;
519       ctx->last_vs_state = ~0;
520       ctx->last_gs_state = ~0;
521       ctx->last_ls = NULL;
522       ctx->last_tcs = NULL;
523       ctx->last_tes_sh_base = -1;
524       ctx->last_num_tcs_input_cp = -1;
525       ctx->last_ls_hs_config = -1; /* impossible value */
526 
527       if (has_clear_state) {
528          si_set_tracked_regs_to_clear_state(ctx);
529       } else {
530          /* Set all register values to unknown. */
531          ctx->tracked_regs.reg_saved = 0;
532          ctx->last_gs_out_prim = -1; /* unknown */
533       }
534 
535       /* 0xffffffff is an impossible value to register SPI_PS_INPUT_CNTL_n */
536       memset(ctx->tracked_regs.spi_ps_input_cntl, 0xff, sizeof(uint32_t) * 32);
537    }
538 
539    if (ctx->scratch_buffer) {
540       si_context_add_resource_size(ctx, &ctx->scratch_buffer->b.b);
541       si_mark_atom_dirty(ctx, &ctx->atoms.s.scratch_state);
542    }
543 
544    if (ctx->streamout.suspended) {
545       ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
546       si_streamout_buffers_dirty(ctx);
547    }
548 
549    if (!list_is_empty(&ctx->active_queries))
550       si_resume_queries(ctx);
551 
552    assert(!ctx->gfx_cs.prev_dw);
553    ctx->initial_gfx_cs_size = ctx->gfx_cs.current.cdw;
554 
555    /* All buffer references are removed on a flush, so si_check_needs_implicit_sync
556     * cannot determine if si_make_CB_shader_coherent() needs to be called.
557     * ctx->force_cb_shader_coherent will be cleared by the first call to
558     * si_make_CB_shader_coherent.
559     */
560    ctx->force_cb_shader_coherent = true;
561 }
562 
si_trace_emit(struct si_context * sctx)563 void si_trace_emit(struct si_context *sctx)
564 {
565    struct radeon_cmdbuf *cs = &sctx->gfx_cs;
566    uint32_t trace_id = ++sctx->current_saved_cs->trace_id;
567 
568    si_cp_write_data(sctx, sctx->current_saved_cs->trace_buf, 0, 4, V_370_MEM, V_370_ME, &trace_id);
569 
570    radeon_begin(cs);
571    radeon_emit(PKT3(PKT3_NOP, 0, 0));
572    radeon_emit(AC_ENCODE_TRACE_POINT(trace_id));
573    radeon_end();
574 
575    if (sctx->log)
576       u_log_flush(sctx->log);
577 }
578 
si_emit_surface_sync(struct si_context * sctx,struct radeon_cmdbuf * cs,unsigned cp_coher_cntl)579 void si_emit_surface_sync(struct si_context *sctx, struct radeon_cmdbuf *cs, unsigned cp_coher_cntl)
580 {
581    bool compute_ib = !sctx->has_graphics;
582 
583    assert(sctx->gfx_level <= GFX9);
584 
585    /* This seems problematic with GFX7 (see #4764) */
586    if (sctx->gfx_level != GFX7)
587       cp_coher_cntl |= 1u << 31; /* don't sync PFP, i.e. execute the sync in ME */
588 
589    radeon_begin(cs);
590 
591    if (sctx->gfx_level == GFX9 || compute_ib) {
592       /* Flush caches and wait for the caches to assert idle. */
593       radeon_emit(PKT3(PKT3_ACQUIRE_MEM, 5, 0));
594       radeon_emit(cp_coher_cntl); /* CP_COHER_CNTL */
595       radeon_emit(0xffffffff);    /* CP_COHER_SIZE */
596       radeon_emit(0xffffff);      /* CP_COHER_SIZE_HI */
597       radeon_emit(0);             /* CP_COHER_BASE */
598       radeon_emit(0);             /* CP_COHER_BASE_HI */
599       radeon_emit(0x0000000A);    /* POLL_INTERVAL */
600    } else {
601       /* ACQUIRE_MEM is only required on a compute ring. */
602       radeon_emit(PKT3(PKT3_SURFACE_SYNC, 3, 0));
603       radeon_emit(cp_coher_cntl); /* CP_COHER_CNTL */
604       radeon_emit(0xffffffff);    /* CP_COHER_SIZE */
605       radeon_emit(0);             /* CP_COHER_BASE */
606       radeon_emit(0x0000000A);    /* POLL_INTERVAL */
607    }
608    radeon_end();
609 
610    /* ACQUIRE_MEM has an implicit context roll if the current context
611     * is busy. */
612    if (!compute_ib)
613       sctx->context_roll = true;
614 }
615 
si_get_wait_mem_scratch_bo(struct si_context * ctx,struct radeon_cmdbuf * cs,bool is_secure)616 static struct si_resource *si_get_wait_mem_scratch_bo(struct si_context *ctx,
617                                                       struct radeon_cmdbuf *cs, bool is_secure)
618 {
619    struct si_screen *sscreen = ctx->screen;
620 
621    assert(ctx->gfx_level < GFX11);
622 
623    if (likely(!is_secure)) {
624       return ctx->wait_mem_scratch;
625    } else {
626       assert(sscreen->info.has_tmz_support);
627       if (!ctx->wait_mem_scratch_tmz) {
628          ctx->wait_mem_scratch_tmz =
629             si_aligned_buffer_create(&sscreen->b,
630                                      PIPE_RESOURCE_FLAG_UNMAPPABLE |
631                                      SI_RESOURCE_FLAG_DRIVER_INTERNAL |
632                                      PIPE_RESOURCE_FLAG_ENCRYPTED,
633                                      PIPE_USAGE_DEFAULT, 4,
634                                      sscreen->info.tcc_cache_line_size);
635          si_cp_write_data(ctx, ctx->wait_mem_scratch_tmz, 0, 4, V_370_MEM, V_370_ME,
636                           &ctx->wait_mem_number);
637       }
638 
639       return ctx->wait_mem_scratch_tmz;
640    }
641 }
642 
gfx10_emit_cache_flush(struct si_context * ctx,struct radeon_cmdbuf * cs)643 void gfx10_emit_cache_flush(struct si_context *ctx, struct radeon_cmdbuf *cs)
644 {
645    uint32_t gcr_cntl = 0;
646    unsigned cb_db_event = 0;
647    unsigned flags = ctx->flags;
648 
649    if (!ctx->has_graphics) {
650       /* Only process compute flags. */
651       flags &= SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
652                SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2 | SI_CONTEXT_INV_L2_METADATA |
653                SI_CONTEXT_CS_PARTIAL_FLUSH;
654    }
655 
656    /* We don't need these. */
657    assert(!(flags & (SI_CONTEXT_VGT_STREAMOUT_SYNC | SI_CONTEXT_FLUSH_AND_INV_DB_META)));
658 
659    radeon_begin(cs);
660 
661    if (flags & SI_CONTEXT_VGT_FLUSH) {
662       radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
663       radeon_emit(EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
664    }
665 
666    if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
667       ctx->num_cb_cache_flushes++;
668    if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
669       ctx->num_db_cache_flushes++;
670 
671    if (flags & SI_CONTEXT_INV_ICACHE)
672       gcr_cntl |= S_586_GLI_INV(V_586_GLI_ALL);
673    if (flags & SI_CONTEXT_INV_SCACHE) {
674       /* TODO: When writing to the SMEM L1 cache, we need to set SEQ
675        * to FORWARD when both L1 and L2 are written out (WB or INV).
676        */
677       gcr_cntl |= S_586_GL1_INV(1) | S_586_GLK_INV(1);
678    }
679    if (flags & SI_CONTEXT_INV_VCACHE)
680       gcr_cntl |= S_586_GL1_INV(1) | S_586_GLV_INV(1);
681 
682    /* The L2 cache ops are:
683     * - INV: - invalidate lines that reflect memory (were loaded from memory)
684     *        - don't touch lines that were overwritten (were stored by gfx clients)
685     * - WB: - don't touch lines that reflect memory
686     *       - write back lines that were overwritten
687     * - WB | INV: - invalidate lines that reflect memory
688     *             - write back lines that were overwritten
689     *
690     * GLM doesn't support WB alone. If WB is set, INV must be set too.
691     */
692    if (flags & SI_CONTEXT_INV_L2) {
693       /* Writeback and invalidate everything in L2. */
694       gcr_cntl |= S_586_GL2_INV(1) | S_586_GL2_WB(1) | S_586_GLM_INV(1) | S_586_GLM_WB(1);
695       ctx->num_L2_invalidates++;
696    } else if (flags & SI_CONTEXT_WB_L2) {
697       gcr_cntl |= S_586_GL2_WB(1) | S_586_GLM_WB(1) | S_586_GLM_INV(1);
698    } else if (flags & SI_CONTEXT_INV_L2_METADATA) {
699       gcr_cntl |= S_586_GLM_INV(1) | S_586_GLM_WB(1);
700    }
701 
702    if (flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
703       if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
704          /* Flush CMASK/FMASK/DCC. Will wait for idle later. */
705          radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
706          radeon_emit(EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
707       }
708 
709       /* Gfx11 can't flush DB_META and should use a TS event instead. */
710       if (ctx->gfx_level != GFX11 && flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
711          /* Flush HTILE. Will wait for idle later. */
712          radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
713          radeon_emit(EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
714       }
715 
716       /* First flush CB/DB, then L1/L2. */
717       gcr_cntl |= S_586_SEQ(V_586_SEQ_FORWARD);
718 
719       if ((flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) ==
720           (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
721          cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
722       } else if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
723          cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
724       } else if (flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
725          if (ctx->gfx_level == GFX11)
726             cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
727          else
728             cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
729       } else {
730          assert(0);
731       }
732    } else {
733       /* Wait for graphics shaders to go idle if requested. */
734       if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
735          radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
736          radeon_emit(EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
737          /* Only count explicit shader flushes, not implicit ones. */
738          ctx->num_vs_flushes++;
739          ctx->num_ps_flushes++;
740       } else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
741          radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
742          radeon_emit(EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
743          ctx->num_vs_flushes++;
744       }
745    }
746 
747    if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH && ctx->compute_is_busy) {
748       radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
749       radeon_emit(EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
750       ctx->num_cs_flushes++;
751       ctx->compute_is_busy = false;
752    }
753 
754    if (cb_db_event) {
755       if (ctx->gfx_level >= GFX11) {
756          /* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
757          unsigned glm_wb = G_586_GLM_WB(gcr_cntl);
758          unsigned glm_inv = G_586_GLM_INV(gcr_cntl);
759          unsigned glk_wb = G_586_GLK_WB(gcr_cntl);
760          unsigned glk_inv = G_586_GLK_INV(gcr_cntl);
761          unsigned glv_inv = G_586_GLV_INV(gcr_cntl);
762          unsigned gl1_inv = G_586_GL1_INV(gcr_cntl);
763          assert(G_586_GL2_US(gcr_cntl) == 0);
764          assert(G_586_GL2_RANGE(gcr_cntl) == 0);
765          assert(G_586_GL2_DISCARD(gcr_cntl) == 0);
766          unsigned gl2_inv = G_586_GL2_INV(gcr_cntl);
767          unsigned gl2_wb = G_586_GL2_WB(gcr_cntl);
768          unsigned gcr_seq = G_586_SEQ(gcr_cntl);
769 
770          gcr_cntl &= C_586_GLM_WB & C_586_GLM_INV & C_586_GLK_WB & C_586_GLK_INV &
771                      C_586_GLV_INV & C_586_GL1_INV & C_586_GL2_INV & C_586_GL2_WB; /* keep SEQ */
772 
773          /* Send an event that flushes caches. */
774          radeon_emit(PKT3(PKT3_RELEASE_MEM, 6, 0));
775          radeon_emit(S_490_EVENT_TYPE(cb_db_event) |
776                      S_490_EVENT_INDEX(5) |
777                      S_490_GLM_WB(glm_wb) | S_490_GLM_INV(glm_inv) | S_490_GLV_INV(glv_inv) |
778                      S_490_GL1_INV(gl1_inv) | S_490_GL2_INV(gl2_inv) | S_490_GL2_WB(gl2_wb) |
779                      S_490_SEQ(gcr_seq) | S_490_GLK_WB(glk_wb) | S_490_GLK_INV(glk_inv) |
780                      S_490_PWS_ENABLE(1));
781          radeon_emit(0); /* DST_SEL, INT_SEL, DATA_SEL */
782          radeon_emit(0); /* ADDRESS_LO */
783          radeon_emit(0); /* ADDRESS_HI */
784          radeon_emit(0); /* DATA_LO */
785          radeon_emit(0); /* DATA_HI */
786          radeon_emit(0); /* INT_CTXID */
787 
788          if (unlikely(ctx->thread_trace_enabled)) {
789             radeon_end();
790             si_sqtt_describe_barrier_start(ctx, &ctx->gfx_cs);
791             radeon_begin_again(cs);
792          }
793 
794          /* Wait for the event and invalidate remaining caches if needed. */
795          radeon_emit(PKT3(PKT3_ACQUIRE_MEM, 6, 0));
796          radeon_emit(S_580_PWS_STAGE_SEL(flags & SI_CONTEXT_PFP_SYNC_ME ? V_580_CP_PFP :
797                                                                           V_580_CP_ME) |
798                      S_580_PWS_COUNTER_SEL(V_580_TS_SELECT) |
799                      S_580_PWS_ENA2(1) |
800                      S_580_PWS_COUNT(0));
801          radeon_emit(0xffffffff); /* GCR_SIZE */
802          radeon_emit(0x01ffffff); /* GCR_SIZE_HI */
803          radeon_emit(0); /* GCR_BASE_LO */
804          radeon_emit(0); /* GCR_BASE_HI */
805          radeon_emit(S_585_PWS_ENA(1));
806          radeon_emit(gcr_cntl); /* GCR_CNTL */
807 
808          if (unlikely(ctx->thread_trace_enabled)) {
809             radeon_end();
810             si_sqtt_describe_barrier_end(ctx, &ctx->gfx_cs, flags);
811             radeon_begin_again(cs);
812          }
813 
814          gcr_cntl = 0; /* all done */
815          flags &= ~SI_CONTEXT_PFP_SYNC_ME;
816       } else {
817          /* GFX10 */
818          radeon_end();
819 
820          struct si_resource *wait_mem_scratch =
821            si_get_wait_mem_scratch_bo(ctx, cs, ctx->ws->cs_is_secure(cs));
822 
823          /* CB/DB flush and invalidate via RELEASE_MEM.
824           * Combine this with other cache flushes when possible.
825           */
826          uint64_t va = wait_mem_scratch->gpu_address;
827          ctx->wait_mem_number++;
828 
829          /* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
830          unsigned glm_wb = G_586_GLM_WB(gcr_cntl);
831          unsigned glm_inv = G_586_GLM_INV(gcr_cntl);
832          unsigned glv_inv = G_586_GLV_INV(gcr_cntl);
833          unsigned gl1_inv = G_586_GL1_INV(gcr_cntl);
834          assert(G_586_GL2_US(gcr_cntl) == 0);
835          assert(G_586_GL2_RANGE(gcr_cntl) == 0);
836          assert(G_586_GL2_DISCARD(gcr_cntl) == 0);
837          unsigned gl2_inv = G_586_GL2_INV(gcr_cntl);
838          unsigned gl2_wb = G_586_GL2_WB(gcr_cntl);
839          unsigned gcr_seq = G_586_SEQ(gcr_cntl);
840 
841          gcr_cntl &= C_586_GLM_WB & C_586_GLM_INV & C_586_GLV_INV & C_586_GL1_INV & C_586_GL2_INV &
842                      C_586_GL2_WB; /* keep SEQ */
843 
844          si_cp_release_mem(ctx, cs, cb_db_event,
845                            S_490_GLM_WB(glm_wb) | S_490_GLM_INV(glm_inv) | S_490_GLV_INV(glv_inv) |
846                            S_490_GL1_INV(gl1_inv) | S_490_GL2_INV(gl2_inv) | S_490_GL2_WB(gl2_wb) |
847                            S_490_SEQ(gcr_seq),
848                            EOP_DST_SEL_MEM, EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
849                            EOP_DATA_SEL_VALUE_32BIT, wait_mem_scratch, va, ctx->wait_mem_number,
850                            SI_NOT_QUERY);
851 
852          if (unlikely(ctx->thread_trace_enabled)) {
853             si_sqtt_describe_barrier_start(ctx, &ctx->gfx_cs);
854          }
855 
856          si_cp_wait_mem(ctx, cs, va, ctx->wait_mem_number, 0xffffffff, WAIT_REG_MEM_EQUAL);
857 
858          if (unlikely(ctx->thread_trace_enabled)) {
859             si_sqtt_describe_barrier_end(ctx, &ctx->gfx_cs, flags);
860          }
861 
862          radeon_begin_again(cs);
863       }
864    }
865 
866    /* Ignore fields that only modify the behavior of other fields. */
867    if (gcr_cntl & C_586_GL1_RANGE & C_586_GL2_RANGE & C_586_SEQ) {
868       unsigned dont_sync_pfp = (!(flags & SI_CONTEXT_PFP_SYNC_ME)) << 31;
869 
870       /* Flush caches and wait for the caches to assert idle.
871        * The cache flush is executed in the ME, but the PFP waits
872        * for completion.
873        */
874       radeon_emit(PKT3(PKT3_ACQUIRE_MEM, 6, 0));
875       radeon_emit(dont_sync_pfp); /* CP_COHER_CNTL */
876       radeon_emit(0xffffffff); /* CP_COHER_SIZE */
877       radeon_emit(0xffffff);   /* CP_COHER_SIZE_HI */
878       radeon_emit(0);          /* CP_COHER_BASE */
879       radeon_emit(0);          /* CP_COHER_BASE_HI */
880       radeon_emit(0x0000000A); /* POLL_INTERVAL */
881       radeon_emit(gcr_cntl);   /* GCR_CNTL */
882    } else if (flags & SI_CONTEXT_PFP_SYNC_ME) {
883       /* Synchronize PFP with ME. (this stalls PFP) */
884       radeon_emit(PKT3(PKT3_PFP_SYNC_ME, 0, 0));
885       radeon_emit(0);
886    }
887 
888    if (flags & SI_CONTEXT_START_PIPELINE_STATS && ctx->pipeline_stats_enabled != 1) {
889       radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
890       radeon_emit(EVENT_TYPE(V_028A90_PIPELINESTAT_START) | EVENT_INDEX(0));
891       ctx->pipeline_stats_enabled = 1;
892    } else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS && ctx->pipeline_stats_enabled != 0) {
893       radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
894       radeon_emit(EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | EVENT_INDEX(0));
895       ctx->pipeline_stats_enabled = 0;
896    }
897    radeon_end();
898 
899    ctx->flags = 0;
900 }
901 
si_emit_cache_flush(struct si_context * sctx,struct radeon_cmdbuf * cs)902 void si_emit_cache_flush(struct si_context *sctx, struct radeon_cmdbuf *cs)
903 {
904    uint32_t flags = sctx->flags;
905 
906    if (!sctx->has_graphics) {
907       /* Only process compute flags. */
908       flags &= SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
909                SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2 | SI_CONTEXT_INV_L2_METADATA |
910                SI_CONTEXT_CS_PARTIAL_FLUSH;
911    }
912 
913    uint32_t cp_coher_cntl = 0;
914    const uint32_t flush_cb_db = flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB);
915 
916    assert(sctx->gfx_level <= GFX9);
917 
918    if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
919       sctx->num_cb_cache_flushes++;
920    if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
921       sctx->num_db_cache_flushes++;
922 
923    /* GFX6 has a bug that it always flushes ICACHE and KCACHE if either
924     * bit is set. An alternative way is to write SQC_CACHES, but that
925     * doesn't seem to work reliably. Since the bug doesn't affect
926     * correctness (it only does more work than necessary) and
927     * the performance impact is likely negligible, there is no plan
928     * to add a workaround for it.
929     */
930 
931    if (flags & SI_CONTEXT_INV_ICACHE)
932       cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
933    if (flags & SI_CONTEXT_INV_SCACHE)
934       cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
935 
936    if (sctx->gfx_level <= GFX8) {
937       if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
938          cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) | S_0085F0_CB0_DEST_BASE_ENA(1) |
939                           S_0085F0_CB1_DEST_BASE_ENA(1) | S_0085F0_CB2_DEST_BASE_ENA(1) |
940                           S_0085F0_CB3_DEST_BASE_ENA(1) | S_0085F0_CB4_DEST_BASE_ENA(1) |
941                           S_0085F0_CB5_DEST_BASE_ENA(1) | S_0085F0_CB6_DEST_BASE_ENA(1) |
942                           S_0085F0_CB7_DEST_BASE_ENA(1);
943 
944          /* Necessary for DCC */
945          if (sctx->gfx_level == GFX8)
946             si_cp_release_mem(sctx, cs, V_028A90_FLUSH_AND_INV_CB_DATA_TS, 0, EOP_DST_SEL_MEM,
947                               EOP_INT_SEL_NONE, EOP_DATA_SEL_DISCARD, NULL, 0, 0, SI_NOT_QUERY);
948       }
949       if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
950          cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1);
951    }
952 
953    radeon_begin(cs);
954 
955    if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
956       /* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
957       radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
958       radeon_emit(EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
959    }
960    if (flags & (SI_CONTEXT_FLUSH_AND_INV_DB | SI_CONTEXT_FLUSH_AND_INV_DB_META)) {
961       /* Flush HTILE. SURFACE_SYNC will wait for idle. */
962       radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
963       radeon_emit(EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
964    }
965 
966    /* Wait for shader engines to go idle.
967     * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
968     * for everything including CB/DB cache flushes.
969     */
970    if (!flush_cb_db) {
971       if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
972          radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
973          radeon_emit(EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
974          /* Only count explicit shader flushes, not implicit ones
975           * done by SURFACE_SYNC.
976           */
977          sctx->num_vs_flushes++;
978          sctx->num_ps_flushes++;
979       } else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
980          radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
981          radeon_emit(EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
982          sctx->num_vs_flushes++;
983       }
984    }
985 
986    if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH && sctx->compute_is_busy) {
987       radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
988       radeon_emit(EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
989       sctx->num_cs_flushes++;
990       sctx->compute_is_busy = false;
991    }
992 
993    /* VGT state synchronization. */
994    if (flags & SI_CONTEXT_VGT_FLUSH) {
995       radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
996       radeon_emit(EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
997    }
998    if (flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
999       radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
1000       radeon_emit(EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
1001    }
1002 
1003    radeon_end();
1004 
1005    /* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
1006     * wait for idle on GFX9. We have to use a TS event.
1007     */
1008    if (sctx->gfx_level == GFX9 && flush_cb_db) {
1009       uint64_t va;
1010       unsigned tc_flags, cb_db_event;
1011 
1012       /* Set the CB/DB flush event. */
1013       switch (flush_cb_db) {
1014       case SI_CONTEXT_FLUSH_AND_INV_CB:
1015          cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
1016          break;
1017       case SI_CONTEXT_FLUSH_AND_INV_DB:
1018          cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
1019          break;
1020       default:
1021          /* both CB & DB */
1022          cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
1023       }
1024 
1025       /* These are the only allowed combinations. If you need to
1026        * do multiple operations at once, do them separately.
1027        * All operations that invalidate L2 also seem to invalidate
1028        * metadata. Volatile (VOL) and WC flushes are not listed here.
1029        *
1030        * TC    | TC_WB         = writeback & invalidate L2
1031        * TC    | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
1032        *         TC_WB | TC_NC = writeback L2 for MTYPE == NC
1033        * TC            | TC_NC = invalidate L2 for MTYPE == NC
1034        * TC    | TC_MD         = writeback & invalidate L2 metadata (DCC, etc.)
1035        * TCL1                  = invalidate L1
1036        */
1037       tc_flags = 0;
1038 
1039       if (flags & SI_CONTEXT_INV_L2_METADATA) {
1040          tc_flags = EVENT_TC_ACTION_ENA | EVENT_TC_MD_ACTION_ENA;
1041       }
1042 
1043       /* Ideally flush TC together with CB/DB. */
1044       if (flags & SI_CONTEXT_INV_L2) {
1045          /* Writeback and invalidate everything in L2 & L1. */
1046          tc_flags = EVENT_TC_ACTION_ENA | EVENT_TC_WB_ACTION_ENA;
1047 
1048          /* Clear the flags. */
1049          flags &= ~(SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2);
1050          sctx->num_L2_invalidates++;
1051       }
1052 
1053       /* Do the flush (enqueue the event and wait for it). */
1054       struct si_resource* wait_mem_scratch =
1055         si_get_wait_mem_scratch_bo(sctx, cs, sctx->ws->cs_is_secure(cs));
1056 
1057       va = wait_mem_scratch->gpu_address;
1058       sctx->wait_mem_number++;
1059 
1060       si_cp_release_mem(sctx, cs, cb_db_event, tc_flags, EOP_DST_SEL_MEM,
1061                         EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM, EOP_DATA_SEL_VALUE_32BIT,
1062                         wait_mem_scratch, va, sctx->wait_mem_number, SI_NOT_QUERY);
1063 
1064       if (unlikely(sctx->thread_trace_enabled)) {
1065          si_sqtt_describe_barrier_start(sctx, &sctx->gfx_cs);
1066       }
1067 
1068       si_cp_wait_mem(sctx, cs, va, sctx->wait_mem_number, 0xffffffff, WAIT_REG_MEM_EQUAL);
1069 
1070       if (unlikely(sctx->thread_trace_enabled)) {
1071          si_sqtt_describe_barrier_end(sctx, &sctx->gfx_cs, sctx->flags);
1072       }
1073    }
1074 
1075    /* GFX6-GFX8 only:
1076     *   When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
1077     *   waits for idle, so it should be last. SURFACE_SYNC is done in PFP.
1078     *
1079     * cp_coher_cntl should contain all necessary flags except TC and PFP flags
1080     * at this point.
1081     *
1082     * GFX6-GFX7 don't support L2 write-back.
1083     */
1084    if (flags & SI_CONTEXT_INV_L2 || (sctx->gfx_level <= GFX7 && (flags & SI_CONTEXT_WB_L2))) {
1085       /* Invalidate L1 & L2. (L1 is always invalidated on GFX6)
1086        * WB must be set on GFX8+ when TC_ACTION is set.
1087        */
1088       si_emit_surface_sync(sctx, cs,
1089                            cp_coher_cntl | S_0085F0_TC_ACTION_ENA(1) | S_0085F0_TCL1_ACTION_ENA(1) |
1090                               S_0301F0_TC_WB_ACTION_ENA(sctx->gfx_level >= GFX8));
1091       cp_coher_cntl = 0;
1092       sctx->num_L2_invalidates++;
1093    } else {
1094       /* L1 invalidation and L2 writeback must be done separately,
1095        * because both operations can't be done together.
1096        */
1097       if (flags & SI_CONTEXT_WB_L2) {
1098          /* WB = write-back
1099           * NC = apply to non-coherent MTYPEs
1100           *      (i.e. MTYPE <= 1, which is what we use everywhere)
1101           *
1102           * WB doesn't work without NC.
1103           */
1104          si_emit_surface_sync(
1105             sctx, cs,
1106             cp_coher_cntl | S_0301F0_TC_WB_ACTION_ENA(1) | S_0301F0_TC_NC_ACTION_ENA(1));
1107          cp_coher_cntl = 0;
1108          sctx->num_L2_writebacks++;
1109       }
1110       if (flags & SI_CONTEXT_INV_VCACHE) {
1111          /* Invalidate per-CU VMEM L1. */
1112          si_emit_surface_sync(sctx, cs, cp_coher_cntl | S_0085F0_TCL1_ACTION_ENA(1));
1113          cp_coher_cntl = 0;
1114       }
1115    }
1116 
1117    /* If TC flushes haven't cleared this... */
1118    if (cp_coher_cntl)
1119       si_emit_surface_sync(sctx, cs, cp_coher_cntl);
1120 
1121    if (flags & SI_CONTEXT_PFP_SYNC_ME) {
1122       radeon_begin(cs);
1123       radeon_emit(PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1124       radeon_emit(0);
1125       radeon_end();
1126    }
1127 
1128    if (flags & SI_CONTEXT_START_PIPELINE_STATS && sctx->pipeline_stats_enabled != 1) {
1129       radeon_begin(cs);
1130       radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
1131       radeon_emit(EVENT_TYPE(V_028A90_PIPELINESTAT_START) | EVENT_INDEX(0));
1132       radeon_end();
1133       sctx->pipeline_stats_enabled = 1;
1134    } else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS && sctx->pipeline_stats_enabled != 0) {
1135       radeon_begin(cs);
1136       radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
1137       radeon_emit(EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | EVENT_INDEX(0));
1138       radeon_end();
1139       sctx->pipeline_stats_enabled = 0;
1140    }
1141 
1142    sctx->flags = 0;
1143 }
1144