1 /*
2 * Copyright 2010 Jerome Glisse <glisse@freedesktop.org>
3 * Copyright 2018 Advanced Micro Devices, Inc.
4 *
5 * SPDX-License-Identifier: MIT
6 */
7
8 #include "si_build_pm4.h"
9 #include "si_pipe.h"
10 #include "sid.h"
11 #include "util/os_time.h"
12 #include "util/u_log.h"
13 #include "util/u_upload_mgr.h"
14 #include "ac_debug.h"
15 #include "si_utrace.h"
16
si_flush_gfx_cs(struct si_context * ctx,unsigned flags,struct pipe_fence_handle ** fence)17 void si_flush_gfx_cs(struct si_context *ctx, unsigned flags, struct pipe_fence_handle **fence)
18 {
19 struct radeon_cmdbuf *cs = &ctx->gfx_cs;
20 struct radeon_winsys *ws = ctx->ws;
21 struct si_screen *sscreen = ctx->screen;
22 const unsigned wait_ps_cs = SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_CS_PARTIAL_FLUSH;
23 unsigned wait_flags = 0;
24
25 if (ctx->gfx_flush_in_progress)
26 return;
27
28 /* The amdgpu kernel driver synchronizes execution for shared DMABUFs between
29 * processes on DRM >= 3.39.0, so we don't have to wait at the end of IBs to
30 * make sure everything is idle.
31 *
32 * The amdgpu winsys synchronizes execution for buffers shared by different
33 * contexts within the same process.
34 *
35 * Interop with AMDVLK, RADV, or OpenCL within the same process requires
36 * explicit fences or glFinish.
37 */
38 if (sscreen->info.is_amdgpu && sscreen->info.drm_minor >= 39)
39 flags |= RADEON_FLUSH_START_NEXT_GFX_IB_NOW;
40
41 if (ctx->gfx_level == GFX6) {
42 /* The kernel flushes L2 before shaders are finished. */
43 wait_flags |= wait_ps_cs;
44 } else if (!(flags & RADEON_FLUSH_START_NEXT_GFX_IB_NOW) ||
45 ((flags & RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION) &&
46 !ws->cs_is_secure(cs))) {
47 /* TODO: this workaround fixes subtitles rendering with mpv -vo=vaapi and
48 * tmz but shouldn't be necessary.
49 */
50 wait_flags |= wait_ps_cs;
51 }
52
53 /* Drop this flush if it's a no-op. */
54 if (!radeon_emitted(cs, ctx->initial_gfx_cs_size) &&
55 (!wait_flags || !ctx->gfx_last_ib_is_busy) &&
56 !(flags & RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION)) {
57 tc_driver_internal_flush_notify(ctx->tc);
58 return;
59 }
60
61 /* Non-aux contexts must set up no-op API dispatch on GPU resets. This is
62 * similar to si_get_reset_status but here we can ignore soft-recoveries,
63 * while si_get_reset_status can't. */
64 if (!(ctx->context_flags & SI_CONTEXT_FLAG_AUX) &&
65 ctx->device_reset_callback.reset) {
66 enum pipe_reset_status status = ctx->ws->ctx_query_reset_status(ctx->ctx, true, NULL, NULL);
67 if (status != PIPE_NO_RESET)
68 ctx->device_reset_callback.reset(ctx->device_reset_callback.data, status);
69 }
70
71 if (sscreen->debug_flags & DBG(CHECK_VM))
72 flags &= ~PIPE_FLUSH_ASYNC;
73
74 ctx->gfx_flush_in_progress = true;
75
76 if (ctx->has_graphics) {
77 if (!list_is_empty(&ctx->active_queries))
78 si_suspend_queries(ctx);
79
80 ctx->streamout.suspended = false;
81 if (ctx->streamout.begin_emitted) {
82 si_emit_streamout_end(ctx);
83 ctx->streamout.suspended = true;
84 }
85 }
86
87 /* Make sure CP DMA is idle at the end of IBs after L2 prefetches
88 * because the kernel doesn't wait for it. */
89 if (ctx->gfx_level >= GFX7)
90 si_cp_dma_wait_for_idle(ctx, &ctx->gfx_cs);
91
92 /* If we use s_sendmsg to set tess factors to all 0 or all 1 instead of writing to the tess
93 * factor buffer, we need this at the end of command buffers:
94 */
95 if ((ctx->gfx_level == GFX11 || ctx->gfx_level == GFX11_5) && ctx->tess_rings) {
96 radeon_begin(cs);
97 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
98 radeon_emit(EVENT_TYPE(V_028A90_SQ_NON_EVENT) | EVENT_INDEX(0));
99 radeon_end();
100 }
101
102 /* Wait for draw calls to finish if needed. */
103 if (wait_flags) {
104 ctx->flags |= wait_flags;
105 si_emit_cache_flush_direct(ctx);
106 }
107 ctx->gfx_last_ib_is_busy = (wait_flags & wait_ps_cs) != wait_ps_cs;
108
109 if (ctx->current_saved_cs) {
110 si_trace_emit(ctx);
111
112 /* Save the IB for debug contexts. */
113 si_save_cs(ws, cs, &ctx->current_saved_cs->gfx, true);
114 ctx->current_saved_cs->flushed = true;
115 ctx->current_saved_cs->time_flush = os_time_get_nano();
116
117 si_log_hw_flush(ctx);
118 }
119
120 if (sscreen->debug_flags & DBG(IB))
121 si_print_current_ib(ctx, stderr);
122
123 if (sscreen->context_roll_log_filename)
124 si_gather_context_rolls(ctx);
125
126 if (ctx->is_noop)
127 flags |= RADEON_FLUSH_NOOP;
128
129 uint64_t start_ts = 0, submission_id = 0;
130 if (u_trace_perfetto_active(&ctx->ds.trace_context)) {
131 start_ts = si_ds_begin_submit(&ctx->ds_queue);
132 submission_id = ctx->ds_queue.submission_id;
133 }
134
135 /* Flush the CS. */
136 ws->cs_flush(cs, flags, &ctx->last_gfx_fence);
137
138 if (u_trace_perfetto_active(&ctx->ds.trace_context) && start_ts > 0) {
139 si_ds_end_submit(&ctx->ds_queue, start_ts);
140 }
141
142 tc_driver_internal_flush_notify(ctx->tc);
143 if (fence)
144 ws->fence_reference(ws, fence, ctx->last_gfx_fence);
145
146 ctx->num_gfx_cs_flushes++;
147
148 /* Check VM faults if needed. */
149 if (sscreen->debug_flags & DBG(CHECK_VM)) {
150 /* Use conservative timeout 800ms, after which we won't wait any
151 * longer and assume the GPU is hung.
152 */
153 ctx->ws->fence_wait(ctx->ws, ctx->last_gfx_fence, 800 * 1000 * 1000);
154
155 si_check_vm_faults(ctx, &ctx->current_saved_cs->gfx, AMD_IP_GFX);
156 }
157
158 if (unlikely(ctx->sqtt && (flags & PIPE_FLUSH_END_OF_FRAME))) {
159 si_handle_sqtt(ctx, &ctx->gfx_cs);
160 }
161
162 if (ctx->current_saved_cs)
163 si_saved_cs_reference(&ctx->current_saved_cs, NULL);
164
165 if (u_trace_perfetto_active(&ctx->ds.trace_context))
166 si_utrace_flush(ctx, submission_id);
167
168 si_begin_new_gfx_cs(ctx, false);
169 ctx->gfx_flush_in_progress = false;
170 }
171
si_begin_gfx_cs_debug(struct si_context * ctx)172 static void si_begin_gfx_cs_debug(struct si_context *ctx)
173 {
174 static const uint32_t zeros[1];
175 assert(!ctx->current_saved_cs);
176
177 ctx->current_saved_cs = calloc(1, sizeof(*ctx->current_saved_cs));
178 if (!ctx->current_saved_cs)
179 return;
180
181 pipe_reference_init(&ctx->current_saved_cs->reference, 1);
182
183 ctx->current_saved_cs->trace_buf =
184 si_resource(pipe_buffer_create(ctx->b.screen, 0, PIPE_USAGE_STAGING, 4));
185 if (!ctx->current_saved_cs->trace_buf) {
186 free(ctx->current_saved_cs);
187 ctx->current_saved_cs = NULL;
188 return;
189 }
190
191 pipe_buffer_write_nooverlap(&ctx->b, &ctx->current_saved_cs->trace_buf->b.b, 0, sizeof(zeros),
192 zeros);
193 ctx->current_saved_cs->trace_id = 0;
194
195 si_trace_emit(ctx);
196
197 radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->current_saved_cs->trace_buf,
198 RADEON_USAGE_READWRITE | RADEON_PRIO_FENCE_TRACE);
199 }
200
si_add_gds_to_buffer_list(struct si_context * sctx)201 static void si_add_gds_to_buffer_list(struct si_context *sctx)
202 {
203 }
204
si_set_tracked_regs_to_clear_state(struct si_context * ctx)205 void si_set_tracked_regs_to_clear_state(struct si_context *ctx)
206 {
207 STATIC_ASSERT(SI_NUM_ALL_TRACKED_REGS <= sizeof(ctx->tracked_regs.reg_saved_mask) * 8);
208
209 ctx->tracked_regs.reg_value[SI_TRACKED_DB_RENDER_CONTROL] = 0;
210 ctx->tracked_regs.reg_value[SI_TRACKED_DB_COUNT_CONTROL] = 0;
211
212 ctx->tracked_regs.reg_value[SI_TRACKED_DB_DEPTH_CONTROL] = 0;
213 ctx->tracked_regs.reg_value[SI_TRACKED_DB_STENCIL_CONTROL] = 0;
214 ctx->tracked_regs.reg_value[SI_TRACKED_DB_DEPTH_BOUNDS_MIN] = 0;
215 ctx->tracked_regs.reg_value[SI_TRACKED_DB_DEPTH_BOUNDS_MAX] = 0;
216
217 ctx->tracked_regs.reg_value[SI_TRACKED_SPI_INTERP_CONTROL_0] = 0;
218 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_POINT_SIZE] = 0;
219 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_POINT_MINMAX] = 0;
220 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_LINE_CNTL] = 0;
221 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_MODE_CNTL_0] = 0;
222 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_SC_MODE_CNTL] = 0x4;
223 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_EDGERULE] = 0xaa99aaaa;
224
225 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_POLY_OFFSET_DB_FMT_CNTL] = 0;
226 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_POLY_OFFSET_CLAMP] = 0;
227 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_POLY_OFFSET_FRONT_SCALE] = 0;
228 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_POLY_OFFSET_FRONT_OFFSET] = 0;
229 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_POLY_OFFSET_BACK_SCALE] = 0;
230 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_POLY_OFFSET_BACK_OFFSET] = 0;
231
232 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_LINE_CNTL] = 0x1000;
233 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_AA_CONFIG] = 0;
234
235 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_VTX_CNTL] = 0x5;
236 ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ] = 0x3f800000;
237 ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_VERT_DISC_ADJ] = 0x3f800000;
238 ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_HORZ_CLIP_ADJ] = 0x3f800000;
239 ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_GB_HORZ_DISC_ADJ] = 0x3f800000;
240
241 ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_POS_FORMAT] = 0;
242
243 ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_Z_FORMAT] = 0;
244 ctx->tracked_regs.reg_value[SI_TRACKED_SPI_SHADER_COL_FORMAT] = 0;
245 ctx->tracked_regs.reg_value[SI_TRACKED_SPI_BARYC_CNTL] = 0;
246 ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_INPUT_ENA] = 0;
247 ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_INPUT_ADDR] = 0;
248
249 ctx->tracked_regs.reg_value[SI_TRACKED_DB_EQAA] = 0;
250 ctx->tracked_regs.reg_value[SI_TRACKED_DB_SHADER_CONTROL] = 0;
251 ctx->tracked_regs.reg_value[SI_TRACKED_CB_SHADER_MASK] = 0xffffffff;
252 ctx->tracked_regs.reg_value[SI_TRACKED_CB_TARGET_MASK] = 0xffffffff;
253 ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_CLIP_CNTL] = 0x90000;
254 ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_VS_OUT_CNTL] = 0;
255 ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_VTE_CNTL] = 0;
256 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_CLIPRECT_RULE] = 0xffff;
257 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_LINE_STIPPLE] = 0;
258 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_MODE_CNTL_1] = 0;
259 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET] = 0;
260 ctx->tracked_regs.reg_value[SI_TRACKED_SPI_PS_IN_CONTROL] = 0x2;
261 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_INSTANCE_CNT] = 0;
262 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_MAX_VERT_OUT] = 0;
263 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_SHADER_STAGES_EN] = 0;
264 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_LS_HS_CONFIG] = 0;
265 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_TF_PARAM] = 0;
266 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SU_SMALL_PRIM_FILTER_CNTL] = 0;
267 ctx->tracked_regs.reg_value[SI_TRACKED_PA_SC_BINNER_CNTL_0] = 0x3;
268 ctx->tracked_regs.reg_value[SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP] = 0;
269 ctx->tracked_regs.reg_value[SI_TRACKED_GE_NGG_SUBGRP_CNTL] = 0;
270 ctx->tracked_regs.reg_value[SI_TRACKED_PA_CL_NGG_CNTL] = 0;
271 ctx->tracked_regs.reg_value[SI_TRACKED_DB_PA_SC_VRS_OVERRIDE_CNTL] = 0;
272
273 ctx->tracked_regs.reg_value[SI_TRACKED_SX_PS_DOWNCONVERT] = 0;
274 ctx->tracked_regs.reg_value[SI_TRACKED_SX_BLEND_OPT_EPSILON] = 0;
275 ctx->tracked_regs.reg_value[SI_TRACKED_SX_BLEND_OPT_CONTROL] = 0;
276
277 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_ESGS_RING_ITEMSIZE] = 0;
278 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_REUSE_OFF] = 0;
279 ctx->tracked_regs.reg_value[SI_TRACKED_IA_MULTI_VGT_PARAM] = 0xff;
280
281 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP] = 0;
282 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_ONCHIP_CNTL] = 0;
283
284 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_ITEMSIZE] = 0;
285 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_MODE] = 0;
286 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL] = 0x1e;
287 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_OUT_PRIM_TYPE] = 0;
288
289 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_1] = 0;
290 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_2] = 0;
291 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GSVS_RING_OFFSET_3] = 0;
292
293 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE] = 0;
294 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_1] = 0;
295 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_2] = 0;
296 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_GS_VERT_ITEMSIZE_3] = 0;
297
298 ctx->tracked_regs.reg_value[SI_TRACKED_DB_RENDER_OVERRIDE2] = 0;
299 ctx->tracked_regs.reg_value[SI_TRACKED_SPI_VS_OUT_CONFIG] = 0;
300 ctx->tracked_regs.reg_value[SI_TRACKED_VGT_PRIMITIVEID_EN] = 0;
301 ctx->tracked_regs.reg_value[SI_TRACKED_CB_DCC_CONTROL] = 0;
302
303 /* Set all cleared context registers to saved. */
304 BITSET_SET_RANGE(ctx->tracked_regs.reg_saved_mask, 0, SI_NUM_TRACKED_CONTEXT_REGS - 1);
305 }
306
si_install_draw_wrapper(struct si_context * sctx,pipe_draw_func wrapper,pipe_draw_vertex_state_func vstate_wrapper)307 void si_install_draw_wrapper(struct si_context *sctx, pipe_draw_func wrapper,
308 pipe_draw_vertex_state_func vstate_wrapper)
309 {
310 if (wrapper) {
311 if (wrapper != sctx->b.draw_vbo) {
312 assert(!sctx->real_draw_vbo);
313 assert(!sctx->real_draw_vertex_state);
314 sctx->real_draw_vbo = sctx->b.draw_vbo;
315 sctx->real_draw_vertex_state = sctx->b.draw_vertex_state;
316 sctx->b.draw_vbo = wrapper;
317 sctx->b.draw_vertex_state = vstate_wrapper;
318 }
319 } else if (sctx->real_draw_vbo) {
320 sctx->real_draw_vbo = NULL;
321 sctx->real_draw_vertex_state = NULL;
322 si_select_draw_vbo(sctx);
323 }
324 }
325
si_tmz_preamble(struct si_context * sctx)326 static void si_tmz_preamble(struct si_context *sctx)
327 {
328 bool secure = si_gfx_resources_check_encrypted(sctx);
329 if (secure != sctx->ws->cs_is_secure(&sctx->gfx_cs)) {
330 si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW |
331 RADEON_FLUSH_TOGGLE_SECURE_SUBMISSION, NULL);
332 }
333 }
334
si_draw_vbo_tmz_preamble(struct pipe_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)335 static void si_draw_vbo_tmz_preamble(struct pipe_context *ctx,
336 const struct pipe_draw_info *info,
337 unsigned drawid_offset,
338 const struct pipe_draw_indirect_info *indirect,
339 const struct pipe_draw_start_count_bias *draws,
340 unsigned num_draws) {
341 struct si_context *sctx = (struct si_context *)ctx;
342
343 si_tmz_preamble(sctx);
344 sctx->real_draw_vbo(ctx, info, drawid_offset, indirect, draws, num_draws);
345 }
346
si_draw_vstate_tmz_preamble(struct pipe_context * ctx,struct pipe_vertex_state * state,uint32_t partial_velem_mask,struct pipe_draw_vertex_state_info info,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)347 static void si_draw_vstate_tmz_preamble(struct pipe_context *ctx,
348 struct pipe_vertex_state *state,
349 uint32_t partial_velem_mask,
350 struct pipe_draw_vertex_state_info info,
351 const struct pipe_draw_start_count_bias *draws,
352 unsigned num_draws) {
353 struct si_context *sctx = (struct si_context *)ctx;
354
355 si_tmz_preamble(sctx);
356 sctx->real_draw_vertex_state(ctx, state, partial_velem_mask, info, draws, num_draws);
357 }
358
si_begin_new_gfx_cs(struct si_context * ctx,bool first_cs)359 void si_begin_new_gfx_cs(struct si_context *ctx, bool first_cs)
360 {
361 bool is_secure = false;
362
363 if (!first_cs)
364 u_trace_fini(&ctx->trace);
365
366 u_trace_init(&ctx->trace, &ctx->ds.trace_context);
367
368 if (unlikely(radeon_uses_secure_bos(ctx->ws))) {
369 is_secure = ctx->ws->cs_is_secure(&ctx->gfx_cs);
370
371 si_install_draw_wrapper(ctx, si_draw_vbo_tmz_preamble,
372 si_draw_vstate_tmz_preamble);
373 }
374
375 if (ctx->is_debug)
376 si_begin_gfx_cs_debug(ctx);
377
378 if (ctx->screen->gds_oa)
379 ctx->ws->cs_add_buffer(&ctx->gfx_cs, ctx->screen->gds_oa, RADEON_USAGE_READWRITE, 0);
380
381 /* Always invalidate caches at the beginning of IBs, because external
382 * users (e.g. BO evictions and SDMA/UVD/VCE IBs) can modify our
383 * buffers.
384 *
385 * Gfx10+ automatically invalidates I$, SMEM$, VMEM$, and GL1$ at the beginning of IBs,
386 * so we only need to flush the GL2 cache.
387 *
388 * Note that the cache flush done by the kernel at the end of GFX IBs
389 * isn't useful here, because that flush can finish after the following
390 * IB starts drawing.
391 *
392 * TODO: Do we also need to invalidate CB & DB caches?
393 */
394 ctx->flags |= SI_CONTEXT_INV_L2;
395 if (ctx->gfx_level < GFX10)
396 ctx->flags |= SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE;
397
398 /* Disable pipeline stats if there are no active queries. */
399 ctx->flags &= ~SI_CONTEXT_START_PIPELINE_STATS & ~SI_CONTEXT_STOP_PIPELINE_STATS;
400 if (ctx->num_hw_pipestat_streamout_queries)
401 ctx->flags |= SI_CONTEXT_START_PIPELINE_STATS;
402 else
403 ctx->flags |= SI_CONTEXT_STOP_PIPELINE_STATS;
404
405 ctx->pipeline_stats_enabled = -1; /* indicate that the current hw state is unknown */
406
407 /* We don't know if the last draw used NGG because it can be a different process.
408 * When switching NGG->legacy, we need to flush VGT for certain hw generations.
409 */
410 if (ctx->screen->info.has_vgt_flush_ngg_legacy_bug && !ctx->ngg)
411 ctx->flags |= SI_CONTEXT_VGT_FLUSH;
412
413 si_mark_atom_dirty(ctx, &ctx->atoms.s.cache_flush);
414
415 if (ctx->screen->attribute_ring) {
416 radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->screen->attribute_ring,
417 RADEON_USAGE_READWRITE | RADEON_PRIO_SHADER_RINGS);
418 }
419 if (ctx->border_color_buffer) {
420 radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->border_color_buffer,
421 RADEON_USAGE_READ | RADEON_PRIO_BORDER_COLORS);
422 }
423 if (ctx->shadowing.registers) {
424 radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->shadowing.registers,
425 RADEON_USAGE_READWRITE | RADEON_PRIO_DESCRIPTORS);
426
427 if (ctx->shadowing.csa)
428 radeon_add_to_buffer_list(ctx, &ctx->gfx_cs, ctx->shadowing.csa,
429 RADEON_USAGE_READWRITE | RADEON_PRIO_DESCRIPTORS);
430 }
431
432 si_add_all_descriptors_to_bo_list(ctx);
433 si_shader_pointers_mark_dirty(ctx);
434 ctx->cs_shader_state.emitted_program = NULL;
435
436 /* The CS initialization should be emitted before everything else. */
437 if (ctx->cs_preamble_state) {
438 struct si_pm4_state *preamble = is_secure ? ctx->cs_preamble_state_tmz :
439 ctx->cs_preamble_state;
440 radeon_begin(&ctx->gfx_cs);
441 radeon_emit_array(preamble->pm4, preamble->ndw);
442 radeon_end();
443 }
444
445 if (!ctx->has_graphics) {
446 ctx->initial_gfx_cs_size = ctx->gfx_cs.current.cdw;
447 return;
448 }
449
450 if (ctx->tess_rings) {
451 radeon_add_to_buffer_list(ctx, &ctx->gfx_cs,
452 unlikely(is_secure) ? si_resource(ctx->tess_rings_tmz) : si_resource(ctx->tess_rings),
453 RADEON_USAGE_READWRITE | RADEON_PRIO_SHADER_RINGS);
454 }
455
456 /* set all valid group as dirty so they get reemited on
457 * next draw command
458 */
459 si_pm4_reset_emitted(ctx);
460
461 if (ctx->queued.named.ls)
462 ctx->prefetch_L2_mask |= SI_PREFETCH_LS;
463 if (ctx->queued.named.hs)
464 ctx->prefetch_L2_mask |= SI_PREFETCH_HS;
465 if (ctx->queued.named.es)
466 ctx->prefetch_L2_mask |= SI_PREFETCH_ES;
467 if (ctx->queued.named.gs)
468 ctx->prefetch_L2_mask |= SI_PREFETCH_GS;
469 if (ctx->queued.named.vs)
470 ctx->prefetch_L2_mask |= SI_PREFETCH_VS;
471 if (ctx->queued.named.ps)
472 ctx->prefetch_L2_mask |= SI_PREFETCH_PS;
473
474 /* CLEAR_STATE disables all colorbuffers, so only enable bound ones. */
475 bool has_clear_state = ctx->screen->info.has_clear_state;
476 if (has_clear_state) {
477 ctx->framebuffer.dirty_cbufs =
478 u_bit_consecutive(0, ctx->framebuffer.state.nr_cbufs);
479 /* CLEAR_STATE disables the zbuffer, so only enable it if it's bound. */
480 ctx->framebuffer.dirty_zsbuf = ctx->framebuffer.state.zsbuf != NULL;
481 } else {
482 ctx->framebuffer.dirty_cbufs = u_bit_consecutive(0, 8);
483 ctx->framebuffer.dirty_zsbuf = true;
484 }
485
486 /* RB+ depth-only rendering needs to set CB_COLOR0_INFO differently from CLEAR_STATE. */
487 if (ctx->screen->info.rbplus_allowed)
488 ctx->framebuffer.dirty_cbufs |= 0x1;
489
490 /* GFX11+ needs to set NUM_SAMPLES differently from CLEAR_STATE. */
491 if (ctx->gfx_level >= GFX11)
492 ctx->framebuffer.dirty_zsbuf = true;
493
494 /* Even with shadowed registers, we have to add buffers to the buffer list.
495 * These atoms are the only ones that add buffers.
496 *
497 * The framebuffer state also needs to set PA_SC_WINDOW_SCISSOR_BR differently from CLEAR_STATE.
498 */
499 si_mark_atom_dirty(ctx, &ctx->atoms.s.framebuffer);
500 si_mark_atom_dirty(ctx, &ctx->atoms.s.render_cond);
501 if (ctx->screen->use_ngg_culling)
502 si_mark_atom_dirty(ctx, &ctx->atoms.s.ngg_cull_state);
503
504 if (first_cs || !ctx->shadowing.registers) {
505 /* These don't add any buffers, so skip them with shadowing. */
506 si_mark_atom_dirty(ctx, &ctx->atoms.s.clip_regs);
507 /* CLEAR_STATE sets zeros. */
508 if (!has_clear_state || ctx->clip_state_any_nonzeros)
509 si_mark_atom_dirty(ctx, &ctx->atoms.s.clip_state);
510 ctx->sample_locs_num_samples = 0;
511 si_mark_atom_dirty(ctx, &ctx->atoms.s.sample_locations);
512 si_mark_atom_dirty(ctx, &ctx->atoms.s.msaa_config);
513 /* CLEAR_STATE sets 0xffff. */
514 if (!has_clear_state || ctx->sample_mask != 0xffff)
515 si_mark_atom_dirty(ctx, &ctx->atoms.s.sample_mask);
516 si_mark_atom_dirty(ctx, &ctx->atoms.s.cb_render_state);
517 /* CLEAR_STATE sets zeros. */
518 if (!has_clear_state || ctx->blend_color_any_nonzeros)
519 si_mark_atom_dirty(ctx, &ctx->atoms.s.blend_color);
520 si_mark_atom_dirty(ctx, &ctx->atoms.s.db_render_state);
521 if (ctx->gfx_level >= GFX9)
522 si_mark_atom_dirty(ctx, &ctx->atoms.s.dpbb_state);
523 si_mark_atom_dirty(ctx, &ctx->atoms.s.stencil_ref);
524 si_mark_atom_dirty(ctx, &ctx->atoms.s.spi_map);
525 if (ctx->gfx_level < GFX11)
526 si_mark_atom_dirty(ctx, &ctx->atoms.s.streamout_enable);
527 /* CLEAR_STATE disables all window rectangles. */
528 if (!has_clear_state || ctx->num_window_rectangles > 0)
529 si_mark_atom_dirty(ctx, &ctx->atoms.s.window_rectangles);
530 si_mark_atom_dirty(ctx, &ctx->atoms.s.guardband);
531 si_mark_atom_dirty(ctx, &ctx->atoms.s.scissors);
532 si_mark_atom_dirty(ctx, &ctx->atoms.s.viewports);
533 si_mark_atom_dirty(ctx, &ctx->atoms.s.vgt_pipeline_state);
534 si_mark_atom_dirty(ctx, &ctx->atoms.s.tess_io_layout);
535
536 /* Set all register values to unknown. */
537 BITSET_ZERO(ctx->tracked_regs.reg_saved_mask);
538
539 if (has_clear_state)
540 si_set_tracked_regs_to_clear_state(ctx);
541
542 /* 0xffffffff is an impossible value for SPI_PS_INPUT_CNTL_n registers */
543 memset(ctx->tracked_regs.spi_ps_input_cntl, 0xff, sizeof(uint32_t) * 32);
544 }
545
546 /* Invalidate various draw states so that they are emitted before
547 * the first draw call. */
548 ctx->last_instance_count = SI_INSTANCE_COUNT_UNKNOWN;
549 ctx->last_index_size = -1;
550 /* Primitive restart is set to false by the gfx preamble on GFX11+. */
551 ctx->last_primitive_restart_en = ctx->gfx_level >= GFX11 ? false : -1;
552 ctx->last_restart_index = SI_RESTART_INDEX_UNKNOWN;
553 ctx->last_prim = -1;
554 ctx->last_vs_state = ~0;
555 ctx->last_gs_state = ~0;
556 ctx->last_ls = NULL;
557 ctx->last_tcs = NULL;
558 ctx->last_tes_sh_base = -1;
559 ctx->last_num_tcs_input_cp = -1;
560
561 assert(ctx->num_buffered_gfx_sh_regs == 0);
562 assert(ctx->num_buffered_compute_sh_regs == 0);
563 ctx->num_buffered_gfx_sh_regs = 0;
564 ctx->num_buffered_compute_sh_regs = 0;
565
566 if (ctx->scratch_buffer)
567 si_mark_atom_dirty(ctx, &ctx->atoms.s.scratch_state);
568
569 if (ctx->streamout.suspended) {
570 ctx->streamout.append_bitmask = ctx->streamout.enabled_mask;
571 si_streamout_buffers_dirty(ctx);
572 }
573
574 if (!list_is_empty(&ctx->active_queries))
575 si_resume_queries(ctx);
576
577 assert(!ctx->gfx_cs.prev_dw);
578 ctx->initial_gfx_cs_size = ctx->gfx_cs.current.cdw;
579
580 /* All buffer references are removed on a flush, so si_check_needs_implicit_sync
581 * cannot determine if si_make_CB_shader_coherent() needs to be called.
582 * ctx->force_cb_shader_coherent will be cleared by the first call to
583 * si_make_CB_shader_coherent.
584 */
585 ctx->force_cb_shader_coherent = true;
586 }
587
si_trace_emit(struct si_context * sctx)588 void si_trace_emit(struct si_context *sctx)
589 {
590 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
591 uint32_t trace_id = ++sctx->current_saved_cs->trace_id;
592
593 si_cp_write_data(sctx, sctx->current_saved_cs->trace_buf, 0, 4, V_370_MEM, V_370_ME, &trace_id);
594
595 radeon_begin(cs);
596 radeon_emit(PKT3(PKT3_NOP, 0, 0));
597 radeon_emit(AC_ENCODE_TRACE_POINT(trace_id));
598 radeon_end();
599
600 if (sctx->log)
601 u_log_flush(sctx->log);
602 }
603
604 /* timestamp logging for u_trace: */
si_emit_ts(struct si_context * sctx,struct si_resource * buffer,unsigned int offset)605 void si_emit_ts(struct si_context *sctx, struct si_resource* buffer, unsigned int offset)
606 {
607 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
608 uint64_t va = buffer->gpu_address + offset;
609 si_cp_release_mem(sctx, cs, V_028A90_BOTTOM_OF_PIPE_TS, 0, EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
610 EOP_DATA_SEL_TIMESTAMP, buffer, va, 0, PIPE_QUERY_TIMESTAMP);
611 }
612
si_emit_surface_sync(struct si_context * sctx,struct radeon_cmdbuf * cs,unsigned cp_coher_cntl)613 void si_emit_surface_sync(struct si_context *sctx, struct radeon_cmdbuf *cs, unsigned cp_coher_cntl)
614 {
615 bool compute_ib = !sctx->has_graphics;
616
617 assert(sctx->gfx_level <= GFX9);
618
619 /* This seems problematic with GFX7 (see #4764) */
620 if (sctx->gfx_level != GFX7)
621 cp_coher_cntl |= 1u << 31; /* don't sync PFP, i.e. execute the sync in ME */
622
623 radeon_begin(cs);
624
625 if (sctx->gfx_level == GFX9 || compute_ib) {
626 /* Flush caches and wait for the caches to assert idle. */
627 radeon_emit(PKT3(PKT3_ACQUIRE_MEM, 5, 0));
628 radeon_emit(cp_coher_cntl); /* CP_COHER_CNTL */
629 radeon_emit(0xffffffff); /* CP_COHER_SIZE */
630 radeon_emit(0xffffff); /* CP_COHER_SIZE_HI */
631 radeon_emit(0); /* CP_COHER_BASE */
632 radeon_emit(0); /* CP_COHER_BASE_HI */
633 radeon_emit(0x0000000A); /* POLL_INTERVAL */
634 } else {
635 /* ACQUIRE_MEM is only required on a compute ring. */
636 radeon_emit(PKT3(PKT3_SURFACE_SYNC, 3, 0));
637 radeon_emit(cp_coher_cntl); /* CP_COHER_CNTL */
638 radeon_emit(0xffffffff); /* CP_COHER_SIZE */
639 radeon_emit(0); /* CP_COHER_BASE */
640 radeon_emit(0x0000000A); /* POLL_INTERVAL */
641 }
642 radeon_end();
643
644 /* ACQUIRE_MEM has an implicit context roll if the current context
645 * is busy. */
646 if (!compute_ib)
647 sctx->context_roll = true;
648 }
649
si_get_wait_mem_scratch_bo(struct si_context * ctx,struct radeon_cmdbuf * cs,bool is_secure)650 static struct si_resource *si_get_wait_mem_scratch_bo(struct si_context *ctx,
651 struct radeon_cmdbuf *cs, bool is_secure)
652 {
653 struct si_screen *sscreen = ctx->screen;
654
655 assert(ctx->gfx_level < GFX11);
656
657 if (likely(!is_secure)) {
658 return ctx->wait_mem_scratch;
659 } else {
660 assert(sscreen->info.has_tmz_support);
661 if (!ctx->wait_mem_scratch_tmz) {
662 ctx->wait_mem_scratch_tmz =
663 si_aligned_buffer_create(&sscreen->b,
664 PIPE_RESOURCE_FLAG_UNMAPPABLE |
665 SI_RESOURCE_FLAG_DRIVER_INTERNAL |
666 PIPE_RESOURCE_FLAG_ENCRYPTED,
667 PIPE_USAGE_DEFAULT, 4,
668 sscreen->info.tcc_cache_line_size);
669 si_cp_write_data(ctx, ctx->wait_mem_scratch_tmz, 0, 4, V_370_MEM, V_370_ME,
670 &ctx->wait_mem_number);
671 }
672
673 return ctx->wait_mem_scratch_tmz;
674 }
675 }
676
gfx10_emit_cache_flush(struct si_context * ctx,struct radeon_cmdbuf * cs)677 void gfx10_emit_cache_flush(struct si_context *ctx, struct radeon_cmdbuf *cs)
678 {
679 uint32_t gcr_cntl = 0;
680 unsigned cb_db_event = 0;
681 unsigned flags = ctx->flags;
682
683 if (!flags)
684 return;
685
686 if (!ctx->has_graphics) {
687 /* Only process compute flags. */
688 flags &= SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
689 SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2 | SI_CONTEXT_INV_L2_METADATA |
690 SI_CONTEXT_CS_PARTIAL_FLUSH;
691 }
692
693 /* We don't need these. */
694 assert(!(flags & (SI_CONTEXT_VGT_STREAMOUT_SYNC | SI_CONTEXT_FLUSH_AND_INV_DB_META)));
695
696 radeon_begin(cs);
697
698 if (flags & SI_CONTEXT_VGT_FLUSH) {
699 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
700 radeon_emit(EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
701 }
702
703 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
704 ctx->num_cb_cache_flushes++;
705 if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
706 ctx->num_db_cache_flushes++;
707
708 if (flags & SI_CONTEXT_INV_ICACHE)
709 gcr_cntl |= S_586_GLI_INV(V_586_GLI_ALL);
710 if (flags & SI_CONTEXT_INV_SCACHE) {
711 /* TODO: When writing to the SMEM L1 cache, we need to set SEQ
712 * to FORWARD when both L1 and L2 are written out (WB or INV).
713 */
714 gcr_cntl |= S_586_GL1_INV(1) | S_586_GLK_INV(1);
715 }
716 if (flags & SI_CONTEXT_INV_VCACHE)
717 gcr_cntl |= S_586_GL1_INV(1) | S_586_GLV_INV(1);
718
719 /* The L2 cache ops are:
720 * - INV: - invalidate lines that reflect memory (were loaded from memory)
721 * - don't touch lines that were overwritten (were stored by gfx clients)
722 * - WB: - don't touch lines that reflect memory
723 * - write back lines that were overwritten
724 * - WB | INV: - invalidate lines that reflect memory
725 * - write back lines that were overwritten
726 *
727 * GLM doesn't support WB alone. If WB is set, INV must be set too.
728 */
729 if (flags & SI_CONTEXT_INV_L2) {
730 /* Writeback and invalidate everything in L2. */
731 gcr_cntl |= S_586_GL2_INV(1) | S_586_GL2_WB(1) | S_586_GLM_INV(1) | S_586_GLM_WB(1);
732 ctx->num_L2_invalidates++;
733 } else if (flags & SI_CONTEXT_WB_L2) {
734 gcr_cntl |= S_586_GL2_WB(1) | S_586_GLM_WB(1) | S_586_GLM_INV(1);
735 } else if (flags & SI_CONTEXT_INV_L2_METADATA) {
736 gcr_cntl |= S_586_GLM_INV(1) | S_586_GLM_WB(1);
737 }
738
739 if (flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
740 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
741 /* Flush CMASK/FMASK/DCC. Will wait for idle later. */
742 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
743 radeon_emit(EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
744 }
745
746 /* Gfx11 can't flush DB_META and should use a TS event instead. */
747 if (ctx->gfx_level != GFX11 && flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
748 /* Flush HTILE. Will wait for idle later. */
749 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
750 radeon_emit(EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
751 }
752
753 /* First flush CB/DB, then L1/L2. */
754 gcr_cntl |= S_586_SEQ(V_586_SEQ_FORWARD);
755
756 if ((flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) ==
757 (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB)) {
758 cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
759 } else if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
760 cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
761 } else if (flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
762 if (ctx->gfx_level == GFX11)
763 cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
764 else
765 cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
766 } else {
767 assert(0);
768 }
769 } else {
770 /* Wait for graphics shaders to go idle if requested. */
771 if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
772 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
773 radeon_emit(EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
774 /* Only count explicit shader flushes, not implicit ones. */
775 ctx->num_vs_flushes++;
776 ctx->num_ps_flushes++;
777 } else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
778 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
779 radeon_emit(EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
780 ctx->num_vs_flushes++;
781 }
782 }
783
784 if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH && ctx->compute_is_busy) {
785 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
786 radeon_emit(EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4)));
787 ctx->num_cs_flushes++;
788 ctx->compute_is_busy = false;
789 }
790
791 if (cb_db_event) {
792 if (ctx->gfx_level >= GFX11) {
793 /* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
794 unsigned glm_wb = G_586_GLM_WB(gcr_cntl);
795 unsigned glm_inv = G_586_GLM_INV(gcr_cntl);
796 unsigned glk_wb = G_586_GLK_WB(gcr_cntl);
797 unsigned glk_inv = G_586_GLK_INV(gcr_cntl);
798 unsigned glv_inv = G_586_GLV_INV(gcr_cntl);
799 unsigned gl1_inv = G_586_GL1_INV(gcr_cntl);
800 assert(G_586_GL2_US(gcr_cntl) == 0);
801 assert(G_586_GL2_RANGE(gcr_cntl) == 0);
802 assert(G_586_GL2_DISCARD(gcr_cntl) == 0);
803 unsigned gl2_inv = G_586_GL2_INV(gcr_cntl);
804 unsigned gl2_wb = G_586_GL2_WB(gcr_cntl);
805 unsigned gcr_seq = G_586_SEQ(gcr_cntl);
806
807 gcr_cntl &= C_586_GLM_WB & C_586_GLM_INV & C_586_GLK_WB & C_586_GLK_INV &
808 C_586_GLV_INV & C_586_GL1_INV & C_586_GL2_INV & C_586_GL2_WB; /* keep SEQ */
809
810 /* Send an event that flushes caches. */
811 radeon_emit(PKT3(PKT3_RELEASE_MEM, 6, 0));
812 radeon_emit(S_490_EVENT_TYPE(cb_db_event) |
813 S_490_EVENT_INDEX(5) |
814 S_490_GLM_WB(glm_wb) | S_490_GLM_INV(glm_inv) | S_490_GLV_INV(glv_inv) |
815 S_490_GL1_INV(gl1_inv) | S_490_GL2_INV(gl2_inv) | S_490_GL2_WB(gl2_wb) |
816 S_490_SEQ(gcr_seq) | S_490_GLK_WB(glk_wb) | S_490_GLK_INV(glk_inv) |
817 S_490_PWS_ENABLE(1));
818 radeon_emit(0); /* DST_SEL, INT_SEL, DATA_SEL */
819 radeon_emit(0); /* ADDRESS_LO */
820 radeon_emit(0); /* ADDRESS_HI */
821 radeon_emit(0); /* DATA_LO */
822 radeon_emit(0); /* DATA_HI */
823 radeon_emit(0); /* INT_CTXID */
824
825 if (unlikely(ctx->sqtt_enabled)) {
826 radeon_end();
827 si_sqtt_describe_barrier_start(ctx, &ctx->gfx_cs);
828 radeon_begin_again(cs);
829 }
830
831 /* Wait for the event and invalidate remaining caches if needed. */
832 radeon_emit(PKT3(PKT3_ACQUIRE_MEM, 6, 0));
833 radeon_emit(S_580_PWS_STAGE_SEL(flags & SI_CONTEXT_PFP_SYNC_ME ? V_580_CP_PFP :
834 V_580_CP_ME) |
835 S_580_PWS_COUNTER_SEL(V_580_TS_SELECT) |
836 S_580_PWS_ENA2(1) |
837 S_580_PWS_COUNT(0));
838 radeon_emit(0xffffffff); /* GCR_SIZE */
839 radeon_emit(0x01ffffff); /* GCR_SIZE_HI */
840 radeon_emit(0); /* GCR_BASE_LO */
841 radeon_emit(0); /* GCR_BASE_HI */
842 radeon_emit(S_585_PWS_ENA(1));
843 radeon_emit(gcr_cntl); /* GCR_CNTL */
844
845 if (unlikely(ctx->sqtt_enabled)) {
846 radeon_end();
847 si_sqtt_describe_barrier_end(ctx, &ctx->gfx_cs, flags);
848 radeon_begin_again(cs);
849 }
850
851 gcr_cntl = 0; /* all done */
852 flags &= ~SI_CONTEXT_PFP_SYNC_ME;
853 } else {
854 /* GFX10 */
855 radeon_end();
856
857 struct si_resource *wait_mem_scratch =
858 si_get_wait_mem_scratch_bo(ctx, cs, ctx->ws->cs_is_secure(cs));
859
860 /* CB/DB flush and invalidate via RELEASE_MEM.
861 * Combine this with other cache flushes when possible.
862 */
863 uint64_t va = wait_mem_scratch->gpu_address;
864 ctx->wait_mem_number++;
865
866 /* Get GCR_CNTL fields, because the encoding is different in RELEASE_MEM. */
867 unsigned glm_wb = G_586_GLM_WB(gcr_cntl);
868 unsigned glm_inv = G_586_GLM_INV(gcr_cntl);
869 unsigned glv_inv = G_586_GLV_INV(gcr_cntl);
870 unsigned gl1_inv = G_586_GL1_INV(gcr_cntl);
871 assert(G_586_GL2_US(gcr_cntl) == 0);
872 assert(G_586_GL2_RANGE(gcr_cntl) == 0);
873 assert(G_586_GL2_DISCARD(gcr_cntl) == 0);
874 unsigned gl2_inv = G_586_GL2_INV(gcr_cntl);
875 unsigned gl2_wb = G_586_GL2_WB(gcr_cntl);
876 unsigned gcr_seq = G_586_SEQ(gcr_cntl);
877
878 gcr_cntl &= C_586_GLM_WB & C_586_GLM_INV & C_586_GLV_INV & C_586_GL1_INV & C_586_GL2_INV &
879 C_586_GL2_WB; /* keep SEQ */
880
881 si_cp_release_mem(ctx, cs, cb_db_event,
882 S_490_GLM_WB(glm_wb) | S_490_GLM_INV(glm_inv) | S_490_GLV_INV(glv_inv) |
883 S_490_GL1_INV(gl1_inv) | S_490_GL2_INV(gl2_inv) | S_490_GL2_WB(gl2_wb) |
884 S_490_SEQ(gcr_seq),
885 EOP_DST_SEL_MEM, EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
886 EOP_DATA_SEL_VALUE_32BIT, wait_mem_scratch, va, ctx->wait_mem_number,
887 SI_NOT_QUERY);
888
889 if (unlikely(ctx->sqtt_enabled)) {
890 si_sqtt_describe_barrier_start(ctx, &ctx->gfx_cs);
891 }
892
893 si_cp_wait_mem(ctx, cs, va, ctx->wait_mem_number, 0xffffffff, WAIT_REG_MEM_EQUAL);
894
895 if (unlikely(ctx->sqtt_enabled)) {
896 si_sqtt_describe_barrier_end(ctx, &ctx->gfx_cs, flags);
897 }
898
899 radeon_begin_again(cs);
900 }
901 }
902
903 /* Ignore fields that only modify the behavior of other fields. */
904 if (gcr_cntl & C_586_GL1_RANGE & C_586_GL2_RANGE & C_586_SEQ) {
905 unsigned dont_sync_pfp = (!(flags & SI_CONTEXT_PFP_SYNC_ME)) << 31;
906
907 /* Flush caches and wait for the caches to assert idle.
908 * The cache flush is executed in the ME, but the PFP waits
909 * for completion.
910 */
911 radeon_emit(PKT3(PKT3_ACQUIRE_MEM, 6, 0));
912 radeon_emit(dont_sync_pfp); /* CP_COHER_CNTL */
913 radeon_emit(0xffffffff); /* CP_COHER_SIZE */
914 radeon_emit(0xffffff); /* CP_COHER_SIZE_HI */
915 radeon_emit(0); /* CP_COHER_BASE */
916 radeon_emit(0); /* CP_COHER_BASE_HI */
917 radeon_emit(0x0000000A); /* POLL_INTERVAL */
918 radeon_emit(gcr_cntl); /* GCR_CNTL */
919 } else if (flags & SI_CONTEXT_PFP_SYNC_ME) {
920 /* Synchronize PFP with ME. (this stalls PFP) */
921 radeon_emit(PKT3(PKT3_PFP_SYNC_ME, 0, 0));
922 radeon_emit(0);
923 }
924
925 if (flags & SI_CONTEXT_START_PIPELINE_STATS && ctx->pipeline_stats_enabled != 1) {
926 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
927 radeon_emit(EVENT_TYPE(V_028A90_PIPELINESTAT_START) | EVENT_INDEX(0));
928 ctx->pipeline_stats_enabled = 1;
929 } else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS && ctx->pipeline_stats_enabled != 0) {
930 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
931 radeon_emit(EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | EVENT_INDEX(0));
932 ctx->pipeline_stats_enabled = 0;
933 }
934 radeon_end();
935
936 ctx->flags = 0;
937 }
938
gfx6_emit_cache_flush(struct si_context * sctx,struct radeon_cmdbuf * cs)939 void gfx6_emit_cache_flush(struct si_context *sctx, struct radeon_cmdbuf *cs)
940 {
941 uint32_t flags = sctx->flags;
942
943 if (!flags)
944 return;
945
946 if (!sctx->has_graphics) {
947 /* Only process compute flags. */
948 flags &= SI_CONTEXT_INV_ICACHE | SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
949 SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2 | SI_CONTEXT_INV_L2_METADATA |
950 SI_CONTEXT_CS_PARTIAL_FLUSH;
951 }
952
953 uint32_t cp_coher_cntl = 0;
954 const uint32_t flush_cb_db = flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB);
955
956 assert(sctx->gfx_level <= GFX9);
957
958 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
959 sctx->num_cb_cache_flushes++;
960 if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
961 sctx->num_db_cache_flushes++;
962
963 /* GFX6 has a bug that it always flushes ICACHE and KCACHE if either
964 * bit is set. An alternative way is to write SQC_CACHES, but that
965 * doesn't seem to work reliably. Since the bug doesn't affect
966 * correctness (it only does more work than necessary) and
967 * the performance impact is likely negligible, there is no plan
968 * to add a workaround for it.
969 */
970
971 if (flags & SI_CONTEXT_INV_ICACHE)
972 cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
973 if (flags & SI_CONTEXT_INV_SCACHE)
974 cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
975
976 if (sctx->gfx_level <= GFX8) {
977 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
978 cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) | S_0085F0_CB0_DEST_BASE_ENA(1) |
979 S_0085F0_CB1_DEST_BASE_ENA(1) | S_0085F0_CB2_DEST_BASE_ENA(1) |
980 S_0085F0_CB3_DEST_BASE_ENA(1) | S_0085F0_CB4_DEST_BASE_ENA(1) |
981 S_0085F0_CB5_DEST_BASE_ENA(1) | S_0085F0_CB6_DEST_BASE_ENA(1) |
982 S_0085F0_CB7_DEST_BASE_ENA(1);
983
984 /* Necessary for DCC */
985 if (sctx->gfx_level == GFX8)
986 si_cp_release_mem(sctx, cs, V_028A90_FLUSH_AND_INV_CB_DATA_TS, 0, EOP_DST_SEL_MEM,
987 EOP_INT_SEL_NONE, EOP_DATA_SEL_DISCARD, NULL, 0, 0, SI_NOT_QUERY);
988 }
989 if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
990 cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1);
991 }
992
993 radeon_begin(cs);
994
995 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
996 /* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
997 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
998 radeon_emit(EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
999 }
1000 if (flags & (SI_CONTEXT_FLUSH_AND_INV_DB | SI_CONTEXT_FLUSH_AND_INV_DB_META)) {
1001 /* Flush HTILE. SURFACE_SYNC will wait for idle. */
1002 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
1003 radeon_emit(EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
1004 }
1005
1006 /* Wait for shader engines to go idle.
1007 * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
1008 * for everything including CB/DB cache flushes.
1009 */
1010 if (!flush_cb_db) {
1011 if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
1012 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
1013 radeon_emit(EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1014 /* Only count explicit shader flushes, not implicit ones
1015 * done by SURFACE_SYNC.
1016 */
1017 sctx->num_vs_flushes++;
1018 sctx->num_ps_flushes++;
1019 } else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
1020 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
1021 radeon_emit(EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1022 sctx->num_vs_flushes++;
1023 }
1024 }
1025
1026 if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH && sctx->compute_is_busy) {
1027 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
1028 radeon_emit(EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
1029 sctx->num_cs_flushes++;
1030 sctx->compute_is_busy = false;
1031 }
1032
1033 /* VGT state synchronization. */
1034 if (flags & SI_CONTEXT_VGT_FLUSH) {
1035 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
1036 radeon_emit(EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
1037 }
1038 if (flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
1039 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
1040 radeon_emit(EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
1041 }
1042
1043 radeon_end();
1044
1045 /* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
1046 * wait for idle on GFX9. We have to use a TS event.
1047 */
1048 if (sctx->gfx_level == GFX9 && flush_cb_db) {
1049 uint64_t va;
1050 unsigned tc_flags, cb_db_event;
1051
1052 /* Set the CB/DB flush event. */
1053 switch (flush_cb_db) {
1054 case SI_CONTEXT_FLUSH_AND_INV_CB:
1055 cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
1056 break;
1057 case SI_CONTEXT_FLUSH_AND_INV_DB:
1058 cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
1059 break;
1060 default:
1061 /* both CB & DB */
1062 cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
1063 }
1064
1065 /* These are the only allowed combinations. If you need to
1066 * do multiple operations at once, do them separately.
1067 * All operations that invalidate L2 also seem to invalidate
1068 * metadata. Volatile (VOL) and WC flushes are not listed here.
1069 *
1070 * TC | TC_WB = writeback & invalidate L2
1071 * TC | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
1072 * TC_WB | TC_NC = writeback L2 for MTYPE == NC
1073 * TC | TC_NC = invalidate L2 for MTYPE == NC
1074 * TC | TC_MD = writeback & invalidate L2 metadata (DCC, etc.)
1075 * TCL1 = invalidate L1
1076 */
1077 tc_flags = 0;
1078
1079 if (flags & SI_CONTEXT_INV_L2_METADATA) {
1080 tc_flags = EVENT_TC_ACTION_ENA | EVENT_TC_MD_ACTION_ENA;
1081 }
1082
1083 /* Ideally flush TC together with CB/DB. */
1084 if (flags & SI_CONTEXT_INV_L2) {
1085 /* Writeback and invalidate everything in L2 & L1. */
1086 tc_flags = EVENT_TC_ACTION_ENA | EVENT_TC_WB_ACTION_ENA;
1087
1088 /* Clear the flags. */
1089 flags &= ~(SI_CONTEXT_INV_L2 | SI_CONTEXT_WB_L2);
1090 sctx->num_L2_invalidates++;
1091 }
1092
1093 /* Do the flush (enqueue the event and wait for it). */
1094 struct si_resource* wait_mem_scratch =
1095 si_get_wait_mem_scratch_bo(sctx, cs, sctx->ws->cs_is_secure(cs));
1096
1097 va = wait_mem_scratch->gpu_address;
1098 sctx->wait_mem_number++;
1099
1100 si_cp_release_mem(sctx, cs, cb_db_event, tc_flags, EOP_DST_SEL_MEM,
1101 EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM, EOP_DATA_SEL_VALUE_32BIT,
1102 wait_mem_scratch, va, sctx->wait_mem_number, SI_NOT_QUERY);
1103
1104 if (unlikely(sctx->sqtt_enabled)) {
1105 si_sqtt_describe_barrier_start(sctx, &sctx->gfx_cs);
1106 }
1107
1108 si_cp_wait_mem(sctx, cs, va, sctx->wait_mem_number, 0xffffffff, WAIT_REG_MEM_EQUAL);
1109
1110 if (unlikely(sctx->sqtt_enabled)) {
1111 si_sqtt_describe_barrier_end(sctx, &sctx->gfx_cs, sctx->flags);
1112 }
1113 }
1114
1115 /* GFX6-GFX8 only:
1116 * When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
1117 * waits for idle, so it should be last. SURFACE_SYNC is done in PFP.
1118 *
1119 * cp_coher_cntl should contain all necessary flags except TC and PFP flags
1120 * at this point.
1121 *
1122 * GFX6-GFX7 don't support L2 write-back.
1123 */
1124 if (flags & SI_CONTEXT_INV_L2 || (sctx->gfx_level <= GFX7 && (flags & SI_CONTEXT_WB_L2))) {
1125 /* Invalidate L1 & L2. (L1 is always invalidated on GFX6)
1126 * WB must be set on GFX8+ when TC_ACTION is set.
1127 */
1128 si_emit_surface_sync(sctx, cs,
1129 cp_coher_cntl | S_0085F0_TC_ACTION_ENA(1) | S_0085F0_TCL1_ACTION_ENA(1) |
1130 S_0301F0_TC_WB_ACTION_ENA(sctx->gfx_level >= GFX8));
1131 cp_coher_cntl = 0;
1132 sctx->num_L2_invalidates++;
1133 } else {
1134 /* L1 invalidation and L2 writeback must be done separately,
1135 * because both operations can't be done together.
1136 */
1137 if (flags & SI_CONTEXT_WB_L2) {
1138 /* WB = write-back
1139 * NC = apply to non-coherent MTYPEs
1140 * (i.e. MTYPE <= 1, which is what we use everywhere)
1141 *
1142 * WB doesn't work without NC.
1143 */
1144 si_emit_surface_sync(
1145 sctx, cs,
1146 cp_coher_cntl | S_0301F0_TC_WB_ACTION_ENA(1) | S_0301F0_TC_NC_ACTION_ENA(1));
1147 cp_coher_cntl = 0;
1148 sctx->num_L2_writebacks++;
1149 }
1150 if (flags & SI_CONTEXT_INV_VCACHE) {
1151 /* Invalidate per-CU VMEM L1. */
1152 si_emit_surface_sync(sctx, cs, cp_coher_cntl | S_0085F0_TCL1_ACTION_ENA(1));
1153 cp_coher_cntl = 0;
1154 }
1155 }
1156
1157 /* If TC flushes haven't cleared this... */
1158 if (cp_coher_cntl)
1159 si_emit_surface_sync(sctx, cs, cp_coher_cntl);
1160
1161 if (flags & SI_CONTEXT_PFP_SYNC_ME) {
1162 radeon_begin(cs);
1163 radeon_emit(PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1164 radeon_emit(0);
1165 radeon_end();
1166 }
1167
1168 if (flags & SI_CONTEXT_START_PIPELINE_STATS && sctx->pipeline_stats_enabled != 1) {
1169 radeon_begin(cs);
1170 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
1171 radeon_emit(EVENT_TYPE(V_028A90_PIPELINESTAT_START) | EVENT_INDEX(0));
1172 radeon_end();
1173 sctx->pipeline_stats_enabled = 1;
1174 } else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS && sctx->pipeline_stats_enabled != 0) {
1175 radeon_begin(cs);
1176 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
1177 radeon_emit(EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | EVENT_INDEX(0));
1178 radeon_end();
1179 sctx->pipeline_stats_enabled = 0;
1180 }
1181
1182 sctx->flags = 0;
1183 }
1184