1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include "si_build_pm4.h"
8 #include "util/u_memory.h"
9 #include "util/u_suballoc.h"
10
11 static void si_set_streamout_enable(struct si_context *sctx, bool enable);
12
si_so_target_reference(struct si_streamout_target ** dst,struct pipe_stream_output_target * src)13 static inline void si_so_target_reference(struct si_streamout_target **dst,
14 struct pipe_stream_output_target *src)
15 {
16 pipe_so_target_reference((struct pipe_stream_output_target **)dst, src);
17 }
18
si_create_so_target(struct pipe_context * ctx,struct pipe_resource * buffer,unsigned buffer_offset,unsigned buffer_size)19 static struct pipe_stream_output_target *si_create_so_target(struct pipe_context *ctx,
20 struct pipe_resource *buffer,
21 unsigned buffer_offset,
22 unsigned buffer_size)
23 {
24 struct si_streamout_target *t;
25 struct si_resource *buf = si_resource(buffer);
26
27 t = CALLOC_STRUCT(si_streamout_target);
28 if (!t) {
29 return NULL;
30 }
31
32 t->b.reference.count = 1;
33 t->b.context = ctx;
34 pipe_resource_reference(&t->b.buffer, buffer);
35 t->b.buffer_offset = buffer_offset;
36 t->b.buffer_size = buffer_size;
37
38 util_range_add(&buf->b.b, &buf->valid_buffer_range, buffer_offset, buffer_offset + buffer_size);
39 return &t->b;
40 }
41
si_so_target_destroy(struct pipe_context * ctx,struct pipe_stream_output_target * target)42 static void si_so_target_destroy(struct pipe_context *ctx, struct pipe_stream_output_target *target)
43 {
44 struct si_streamout_target *t = (struct si_streamout_target *)target;
45 pipe_resource_reference(&t->b.buffer, NULL);
46 si_resource_reference(&t->buf_filled_size, NULL);
47 FREE(t);
48 }
49
si_streamout_buffers_dirty(struct si_context * sctx)50 void si_streamout_buffers_dirty(struct si_context *sctx)
51 {
52 if (!sctx->streamout.enabled_mask)
53 return;
54
55 si_mark_atom_dirty(sctx, &sctx->atoms.s.streamout_begin);
56 si_set_streamout_enable(sctx, true);
57 }
58
si_set_streamout_targets(struct pipe_context * ctx,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets,enum mesa_prim output_prim)59 static void si_set_streamout_targets(struct pipe_context *ctx, unsigned num_targets,
60 struct pipe_stream_output_target **targets,
61 const unsigned *offsets,
62 enum mesa_prim output_prim)
63 {
64 struct si_context *sctx = (struct si_context *)ctx;
65 unsigned old_num_targets = sctx->streamout.num_targets;
66 unsigned i;
67
68 if (!old_num_targets && !num_targets)
69 return;
70
71 if (sctx->gfx_level >= GFX12)
72 si_set_internal_shader_buffer(sctx, SI_STREAMOUT_STATE_BUF, NULL);
73
74 /* We are going to unbind the buffers. Mark which caches need to be flushed. */
75 if (old_num_targets && sctx->streamout.begin_emitted) {
76 /* Stop streamout. */
77 si_emit_streamout_end(sctx);
78
79 /* Since streamout uses vector writes which go through L2
80 * and most other clients can use L2 as well, we don't need
81 * to flush it.
82 *
83 * The only cases which requires flushing it is VGT DMA index
84 * fetching (on <= GFX7) and indirect draw data, which are rare
85 * cases. Thus, flag the L2 dirtiness in the resource and
86 * handle it at draw call time.
87 */
88 for (i = 0; i < old_num_targets; i++)
89 if (sctx->streamout.targets[i])
90 si_resource(sctx->streamout.targets[i]->b.buffer)->L2_cache_dirty = true;
91
92 /* Invalidate the scalar cache in case a streamout buffer is
93 * going to be used as a constant buffer.
94 *
95 * Invalidate vL1, because streamout bypasses it (done by
96 * setting GLC=1 in the store instruction), but vL1 in other
97 * CUs can contain outdated data of streamout buffers.
98 *
99 * VS_PARTIAL_FLUSH is required if the buffers are going to be
100 * used as an input immediately.
101 */
102 sctx->barrier_flags |= SI_BARRIER_INV_SMEM | SI_BARRIER_INV_VMEM |
103 SI_BARRIER_SYNC_VS | SI_BARRIER_PFP_SYNC_ME;
104
105 /* Make the streamout state buffer available to the CP for resuming and DrawTF. */
106 if (sctx->screen->info.cp_sdma_ge_use_system_memory_scope)
107 sctx->barrier_flags |= SI_BARRIER_WB_L2;
108
109 si_mark_atom_dirty(sctx, &sctx->atoms.s.barrier);
110 }
111
112 /* TODO: This is a hack that fixes these failures. It shouldn't be necessary.
113 * spec@ext_transform_feedback@immediate-reuse
114 * spec@ext_transform_feedback@immediate-reuse-index-buffer
115 * spec@ext_transform_feedback@immediate-reuse-uniform-buffer
116 */
117 if (sctx->gfx_level >= GFX11 && sctx->gfx_level < GFX12 && old_num_targets)
118 si_flush_gfx_cs(sctx, 0, NULL);
119
120 /* Streamout buffers must be bound in 2 places:
121 * 1) in VGT by setting the VGT_STRMOUT registers
122 * 2) as shader resources
123 */
124 unsigned enabled_mask = 0, append_bitmask = 0;
125
126 for (i = 0; i < num_targets; i++) {
127 si_so_target_reference(&sctx->streamout.targets[i], targets[i]);
128
129 if (!targets[i]) {
130 si_set_internal_shader_buffer(sctx, SI_VS_STREAMOUT_BUF0 + i, NULL);
131 continue;
132 }
133
134 enabled_mask |= 1 << i;
135
136 if (offsets[i] == ((unsigned)-1))
137 append_bitmask |= 1 << i;
138
139 /* Allocate space for the filled buffer size. */
140 struct si_streamout_target *t = sctx->streamout.targets[i];
141
142 if (sctx->gfx_level >= GFX12) {
143 bool first_target = util_bitcount(enabled_mask) == 1;
144
145 /* The first enabled streamout target allocates the ordered ID/offset buffer for all
146 * targets. The other targets only hold the reference to the buffer because they need
147 * it for glDrawTransformFeedbackStream if stream != 0.
148 */
149 if (first_target) {
150 /* If not appending, we need to reset the buffer. */
151 if (!append_bitmask) {
152 /* The layout is:
153 * struct {
154 * struct {
155 * uint32_t ordered_id; // equal for all buffers
156 * uint32_t dwords_written; // it's actually in bytes
157 * } buffer[4];
158 * };
159 *
160 * The buffer must be initialized to 0 and the address must be aligned to 64
161 * because it's faster when the atomic doesn't straddle a 64B block boundary.
162 */
163 unsigned alloc_size = 32;
164 unsigned alignment = 64;
165
166 si_resource_reference(&t->buf_filled_size, NULL);
167 u_suballocator_alloc(&sctx->allocator_zeroed_memory, alloc_size, alignment,
168 &t->buf_filled_size_offset,
169 (struct pipe_resource **)&t->buf_filled_size);
170 }
171
172 /* Bind the buffer to the shader for global_atomic_ordered_add_b64. */
173 struct pipe_shader_buffer sbuf;
174 sbuf.buffer = &t->buf_filled_size->b.b;
175 sbuf.buffer_offset = t->buf_filled_size_offset;
176 sbuf.buffer_size = 32; /* unused, the shader only uses the low 32 bits of the address */
177
178 si_set_internal_shader_buffer(sctx, SI_STREAMOUT_STATE_BUF, &sbuf);
179 } else {
180 /* All other streamout targets use the same buffer as the first one. */
181 struct si_streamout_target *first = sctx->streamout.targets[ffs(enabled_mask) - 1];
182
183 assert(first != t);
184 assert(first->buf_filled_size);
185 si_resource_reference(&t->buf_filled_size, first->buf_filled_size);
186 t->buf_filled_size_offset = first->buf_filled_size_offset;
187 }
188
189 /* Offset to dwords_written of the streamout buffer. */
190 t->buf_filled_size_draw_count_offset = t->buf_filled_size_offset + i * 8 + 4;
191 } else {
192 /* GFX6-11 */
193 if (!t->buf_filled_size) {
194 unsigned alloc_size = sctx->gfx_level >= GFX11 ? 8 : 4;
195
196 u_suballocator_alloc(&sctx->allocator_zeroed_memory, alloc_size, 4,
197 &t->buf_filled_size_offset,
198 (struct pipe_resource **)&t->buf_filled_size);
199 t->buf_filled_size_draw_count_offset = t->buf_filled_size_offset;
200 }
201 }
202
203 /* Bind it to the shader. */
204 struct pipe_shader_buffer sbuf;
205 sbuf.buffer = targets[i]->buffer;
206
207 if (sctx->gfx_level >= GFX11) {
208 sbuf.buffer_offset = targets[i]->buffer_offset;
209 sbuf.buffer_size = targets[i]->buffer_size;
210 } else {
211 sbuf.buffer_offset = 0;
212 sbuf.buffer_size = targets[i]->buffer_offset + targets[i]->buffer_size;
213 }
214
215 si_set_internal_shader_buffer(sctx, SI_VS_STREAMOUT_BUF0 + i, &sbuf);
216 si_resource(targets[i]->buffer)->bind_history |= SI_BIND_STREAMOUT_BUFFER;
217 }
218 for (; i < old_num_targets; i++) {
219 si_so_target_reference(&sctx->streamout.targets[i], NULL);
220 si_set_internal_shader_buffer(sctx, SI_VS_STREAMOUT_BUF0 + i, NULL);
221 }
222
223 /* Either streamout is being resumed for all targets or none. Required by how we implement it
224 * for GFX12.
225 */
226 assert(!append_bitmask || enabled_mask == append_bitmask);
227
228 if (!!sctx->streamout.enabled_mask != !!enabled_mask)
229 sctx->do_update_shaders = true; /* to keep/remove streamout shader code as an optimization */
230
231 sctx->streamout.output_prim = output_prim;
232 sctx->streamout.num_verts_per_prim = output_prim == MESA_PRIM_UNKNOWN ?
233 0 : mesa_vertices_per_prim(output_prim);
234 sctx->streamout.num_targets = num_targets;
235 sctx->streamout.enabled_mask = enabled_mask;
236 sctx->streamout.append_bitmask = append_bitmask;
237
238 /* Update dirty state bits. */
239 if (num_targets) {
240 si_streamout_buffers_dirty(sctx);
241
242 /* All readers of the streamout targets need to be finished before we can
243 * start writing to them.
244 */
245 sctx->barrier_flags |= SI_BARRIER_SYNC_PS | SI_BARRIER_SYNC_CS |
246 SI_BARRIER_PFP_SYNC_ME;
247 si_mark_atom_dirty(sctx, &sctx->atoms.s.barrier);
248 } else {
249 si_set_atom_dirty(sctx, &sctx->atoms.s.streamout_begin, false);
250 si_set_streamout_enable(sctx, false);
251 }
252 }
253
si_flush_vgt_streamout(struct si_context * sctx)254 static void si_flush_vgt_streamout(struct si_context *sctx)
255 {
256 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
257 unsigned reg_strmout_cntl;
258
259 radeon_begin(cs);
260
261 /* The register is at different places on different ASICs. */
262 if (sctx->gfx_level >= GFX9) {
263 reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
264 radeon_emit(PKT3(PKT3_WRITE_DATA, 3, 0));
265 radeon_emit(S_370_DST_SEL(V_370_MEM_MAPPED_REGISTER) | S_370_ENGINE_SEL(V_370_ME));
266 radeon_emit(R_0300FC_CP_STRMOUT_CNTL >> 2);
267 radeon_emit(0);
268 radeon_emit(0);
269 } else if (sctx->gfx_level >= GFX7) {
270 reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
271 radeon_set_uconfig_reg(reg_strmout_cntl, 0);
272 } else {
273 reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL;
274 radeon_set_config_reg(reg_strmout_cntl, 0);
275 }
276
277 radeon_event_write(V_028A90_SO_VGTSTREAMOUT_FLUSH);
278
279 radeon_emit(PKT3(PKT3_WAIT_REG_MEM, 5, 0));
280 radeon_emit(WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
281 radeon_emit(reg_strmout_cntl >> 2); /* register */
282 radeon_emit(0);
283 radeon_emit(S_0084FC_OFFSET_UPDATE_DONE(1)); /* reference value */
284 radeon_emit(S_0084FC_OFFSET_UPDATE_DONE(1)); /* mask */
285 radeon_emit(4); /* poll interval */
286 radeon_end();
287 }
288
si_emit_streamout_begin(struct si_context * sctx,unsigned index)289 static void si_emit_streamout_begin(struct si_context *sctx, unsigned index)
290 {
291 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
292 struct si_streamout_target **t = sctx->streamout.targets;
293 bool first_target = true;
294
295 if (sctx->gfx_level < GFX11)
296 si_flush_vgt_streamout(sctx);
297
298 for (unsigned i = 0; i < sctx->streamout.num_targets; i++) {
299 if (!t[i])
300 continue;
301
302 t[i]->stride = sctx->streamout.stride_in_dw[i] * 4;
303
304 if (sctx->gfx_level >= GFX12) {
305 /* Only the first streamout target holds information. */
306 if (first_target) {
307 if (sctx->streamout.append_bitmask & (1 << i)) {
308 si_cp_copy_data(sctx, cs, COPY_DATA_REG, NULL,
309 R_0309B0_GE_GS_ORDERED_ID_BASE >> 2, COPY_DATA_SRC_MEM,
310 t[i]->buf_filled_size, t[i]->buf_filled_size_offset);
311 } else {
312 radeon_begin(cs);
313 radeon_set_uconfig_reg(R_0309B0_GE_GS_ORDERED_ID_BASE, 0);
314 radeon_end();
315 }
316
317 first_target = false;
318 }
319 } else if (sctx->gfx_level >= GFX11) {
320 if (sctx->streamout.append_bitmask & (1 << i)) {
321 /* Restore the register value. */
322 si_cp_copy_data(sctx, cs, COPY_DATA_REG, NULL,
323 (R_031088_GDS_STRMOUT_DWORDS_WRITTEN_0 / 4) + i,
324 COPY_DATA_SRC_MEM, t[i]->buf_filled_size,
325 t[i]->buf_filled_size_offset);
326 } else {
327 /* Set to 0. */
328 radeon_begin(cs);
329 radeon_set_uconfig_reg(R_031088_GDS_STRMOUT_DWORDS_WRITTEN_0 + i * 4, 0);
330 radeon_end();
331 }
332 } else {
333 /* Legacy streamout.
334 *
335 * The hw binds streamout buffers as shader resources. VGT only counts primitives
336 * and tells the shader through SGPRs what to do.
337 */
338 radeon_begin(cs);
339 radeon_set_context_reg_seq(R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16 * i, 2);
340 radeon_emit((t[i]->b.buffer_offset + t[i]->b.buffer_size) >> 2); /* BUFFER_SIZE (in DW) */
341 radeon_emit(sctx->streamout.stride_in_dw[i]); /* VTX_STRIDE (in DW) */
342
343 if (sctx->streamout.append_bitmask & (1 << i) && t[i]->buf_filled_size_valid) {
344 uint64_t va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset;
345
346 /* Append. */
347 radeon_emit(PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
348 radeon_emit(STRMOUT_SELECT_BUFFER(i) | STRMOUT_DATA_TYPE(1) | /* offset in bytes */
349 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */
350 radeon_emit(0); /* unused */
351 radeon_emit(0); /* unused */
352 radeon_emit(va); /* src address lo */
353 radeon_emit(va >> 32); /* src address hi */
354
355 radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, t[i]->buf_filled_size,
356 RADEON_USAGE_READ | RADEON_PRIO_SO_FILLED_SIZE);
357 } else {
358 /* Start from the beginning. */
359 radeon_emit(PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
360 radeon_emit(STRMOUT_SELECT_BUFFER(i) | STRMOUT_DATA_TYPE(1) | /* offset in bytes */
361 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */
362 radeon_emit(0); /* unused */
363 radeon_emit(0); /* unused */
364 radeon_emit(t[i]->b.buffer_offset >> 2); /* buffer offset in DW */
365 radeon_emit(0); /* unused */
366 }
367 radeon_end_update_context_roll();
368 }
369 }
370
371 sctx->streamout.begin_emitted = true;
372 }
373
si_emit_streamout_end(struct si_context * sctx)374 void si_emit_streamout_end(struct si_context *sctx)
375 {
376 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
377 struct si_streamout_target **t = sctx->streamout.targets;
378
379 if (sctx->gfx_level >= GFX12) {
380 /* Nothing to do. The streamout state buffer already contains the next ordered ID, which
381 * is the only thing we need to restore.
382 */
383 sctx->streamout.begin_emitted = false;
384 return;
385 }
386
387 if (sctx->gfx_level >= GFX11) {
388 /* Wait for streamout to finish before reading GDS_STRMOUT registers. */
389 sctx->barrier_flags |= SI_BARRIER_SYNC_VS;
390 si_emit_barrier_direct(sctx);
391 } else {
392 si_flush_vgt_streamout(sctx);
393 }
394
395 for (unsigned i = 0; i < sctx->streamout.num_targets; i++) {
396 if (!t[i])
397 continue;
398
399 if (sctx->gfx_level >= GFX11) {
400 si_cp_copy_data(sctx, &sctx->gfx_cs, COPY_DATA_DST_MEM,
401 t[i]->buf_filled_size, t[i]->buf_filled_size_offset,
402 COPY_DATA_REG, NULL,
403 (R_031088_GDS_STRMOUT_DWORDS_WRITTEN_0 >> 2) + i);
404 /* For DrawTF reading buf_filled_size: */
405 sctx->barrier_flags |= SI_BARRIER_PFP_SYNC_ME;
406 si_mark_atom_dirty(sctx, &sctx->atoms.s.barrier);
407 } else {
408 uint64_t va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset;
409
410 radeon_begin(cs);
411 radeon_emit(PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
412 radeon_emit(STRMOUT_SELECT_BUFFER(i) | STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
413 STRMOUT_DATA_TYPE(1) | STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */
414 radeon_emit(va); /* dst address lo */
415 radeon_emit(va >> 32); /* dst address hi */
416 radeon_emit(0); /* unused */
417 radeon_emit(0); /* unused */
418
419 /* Zero the buffer size. The counters (primitives generated,
420 * primitives emitted) may be enabled even if there is not
421 * buffer bound. This ensures that the primitives-emitted query
422 * won't increment. */
423 radeon_set_context_reg(R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16 * i, 0);
424 radeon_end_update_context_roll();
425
426 radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, t[i]->buf_filled_size,
427 RADEON_USAGE_WRITE | RADEON_PRIO_SO_FILLED_SIZE);
428 }
429
430 t[i]->buf_filled_size_valid = true;
431 }
432
433 sctx->streamout.begin_emitted = false;
434 }
435
436 /* STREAMOUT CONFIG DERIVED STATE
437 *
438 * Streamout must be enabled for the PRIMITIVES_GENERATED query to work.
439 * The buffer mask is an independent state, so no writes occur if there
440 * are no buffers bound.
441 */
442
si_emit_streamout_enable(struct si_context * sctx,unsigned index)443 static void si_emit_streamout_enable(struct si_context *sctx, unsigned index)
444 {
445 assert(sctx->gfx_level < GFX11);
446
447 radeon_begin(&sctx->gfx_cs);
448 radeon_set_context_reg_seq(R_028B94_VGT_STRMOUT_CONFIG, 2);
449 radeon_emit(S_028B94_STREAMOUT_0_EN(si_get_strmout_en(sctx)) |
450 S_028B94_RAST_STREAM(0) |
451 S_028B94_STREAMOUT_1_EN(si_get_strmout_en(sctx)) |
452 S_028B94_STREAMOUT_2_EN(si_get_strmout_en(sctx)) |
453 S_028B94_STREAMOUT_3_EN(si_get_strmout_en(sctx)));
454 radeon_emit(sctx->streamout.hw_enabled_mask & sctx->streamout.enabled_stream_buffers_mask);
455 radeon_end();
456 }
457
si_set_streamout_enable(struct si_context * sctx,bool enable)458 static void si_set_streamout_enable(struct si_context *sctx, bool enable)
459 {
460 if (sctx->gfx_level >= GFX11)
461 return;
462
463 bool old_strmout_en = si_get_strmout_en(sctx);
464 unsigned old_hw_enabled_mask = sctx->streamout.hw_enabled_mask;
465
466 sctx->streamout.streamout_enabled = enable;
467
468 sctx->streamout.hw_enabled_mask =
469 sctx->streamout.enabled_mask | (sctx->streamout.enabled_mask << 4) |
470 (sctx->streamout.enabled_mask << 8) | (sctx->streamout.enabled_mask << 12);
471
472 if ((old_strmout_en != si_get_strmout_en(sctx)) ||
473 (old_hw_enabled_mask != sctx->streamout.hw_enabled_mask))
474 si_mark_atom_dirty(sctx, &sctx->atoms.s.streamout_enable);
475 }
476
si_update_prims_generated_query_state(struct si_context * sctx,unsigned type,int diff)477 void si_update_prims_generated_query_state(struct si_context *sctx, unsigned type, int diff)
478 {
479 if (sctx->gfx_level < GFX11 && type == PIPE_QUERY_PRIMITIVES_GENERATED) {
480 bool old_strmout_en = si_get_strmout_en(sctx);
481
482 sctx->streamout.num_prims_gen_queries += diff;
483 assert(sctx->streamout.num_prims_gen_queries >= 0);
484
485 sctx->streamout.prims_gen_query_enabled = sctx->streamout.num_prims_gen_queries != 0;
486
487 if (old_strmout_en != si_get_strmout_en(sctx))
488 si_mark_atom_dirty(sctx, &sctx->atoms.s.streamout_enable);
489
490 if (si_update_ngg(sctx)) {
491 si_shader_change_notify(sctx);
492 sctx->do_update_shaders = true;
493 }
494 }
495 }
496
si_init_streamout_functions(struct si_context * sctx)497 void si_init_streamout_functions(struct si_context *sctx)
498 {
499 sctx->b.create_stream_output_target = si_create_so_target;
500 sctx->b.stream_output_target_destroy = si_so_target_destroy;
501 sctx->b.set_stream_output_targets = si_set_streamout_targets;
502 sctx->atoms.s.streamout_begin.emit = si_emit_streamout_begin;
503
504 if (sctx->gfx_level < GFX11)
505 sctx->atoms.s.streamout_enable.emit = si_emit_streamout_enable;
506 }
507