1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * SPDX-License-Identifier: MIT
5 */
6
7 #include "si_build_pm4.h"
8 #include "util/u_memory.h"
9 #include "util/u_suballoc.h"
10
11 static void si_set_streamout_enable(struct si_context *sctx, bool enable);
12
si_so_target_reference(struct si_streamout_target ** dst,struct pipe_stream_output_target * src)13 static inline void si_so_target_reference(struct si_streamout_target **dst,
14 struct pipe_stream_output_target *src)
15 {
16 pipe_so_target_reference((struct pipe_stream_output_target **)dst, src);
17 }
18
si_create_so_target(struct pipe_context * ctx,struct pipe_resource * buffer,unsigned buffer_offset,unsigned buffer_size)19 static struct pipe_stream_output_target *si_create_so_target(struct pipe_context *ctx,
20 struct pipe_resource *buffer,
21 unsigned buffer_offset,
22 unsigned buffer_size)
23 {
24 struct si_streamout_target *t;
25 struct si_resource *buf = si_resource(buffer);
26
27 t = CALLOC_STRUCT(si_streamout_target);
28 if (!t) {
29 return NULL;
30 }
31
32 t->b.reference.count = 1;
33 t->b.context = ctx;
34 pipe_resource_reference(&t->b.buffer, buffer);
35 t->b.buffer_offset = buffer_offset;
36 t->b.buffer_size = buffer_size;
37
38 util_range_add(&buf->b.b, &buf->valid_buffer_range, buffer_offset, buffer_offset + buffer_size);
39 return &t->b;
40 }
41
si_so_target_destroy(struct pipe_context * ctx,struct pipe_stream_output_target * target)42 static void si_so_target_destroy(struct pipe_context *ctx, struct pipe_stream_output_target *target)
43 {
44 struct si_streamout_target *t = (struct si_streamout_target *)target;
45 pipe_resource_reference(&t->b.buffer, NULL);
46 si_resource_reference(&t->buf_filled_size, NULL);
47 FREE(t);
48 }
49
si_streamout_buffers_dirty(struct si_context * sctx)50 void si_streamout_buffers_dirty(struct si_context *sctx)
51 {
52 if (!sctx->streamout.enabled_mask)
53 return;
54
55 si_mark_atom_dirty(sctx, &sctx->atoms.s.streamout_begin);
56 si_set_streamout_enable(sctx, true);
57 }
58
si_set_streamout_targets(struct pipe_context * ctx,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets)59 static void si_set_streamout_targets(struct pipe_context *ctx, unsigned num_targets,
60 struct pipe_stream_output_target **targets,
61 const unsigned *offsets)
62 {
63 struct si_context *sctx = (struct si_context *)ctx;
64 unsigned old_num_targets = sctx->streamout.num_targets;
65 unsigned i;
66
67 if (!old_num_targets && !num_targets)
68 return;
69
70 /* We are going to unbind the buffers. Mark which caches need to be flushed. */
71 if (old_num_targets && sctx->streamout.begin_emitted) {
72 /* Stop streamout. */
73 si_emit_streamout_end(sctx);
74
75 /* Since streamout uses vector writes which go through TC L2
76 * and most other clients can use TC L2 as well, we don't need
77 * to flush it.
78 *
79 * The only cases which requires flushing it is VGT DMA index
80 * fetching (on <= GFX7) and indirect draw data, which are rare
81 * cases. Thus, flag the TC L2 dirtiness in the resource and
82 * handle it at draw call time.
83 */
84 for (i = 0; i < old_num_targets; i++)
85 if (sctx->streamout.targets[i])
86 si_resource(sctx->streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
87
88 /* Invalidate the scalar cache in case a streamout buffer is
89 * going to be used as a constant buffer.
90 *
91 * Invalidate vL1, because streamout bypasses it (done by
92 * setting GLC=1 in the store instruction), but vL1 in other
93 * CUs can contain outdated data of streamout buffers.
94 *
95 * VS_PARTIAL_FLUSH is required if the buffers are going to be
96 * used as an input immediately.
97 */
98 sctx->flags |= SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE |
99 SI_CONTEXT_VS_PARTIAL_FLUSH | SI_CONTEXT_PFP_SYNC_ME;
100 si_mark_atom_dirty(sctx, &sctx->atoms.s.cache_flush);
101 }
102
103 /* TODO: This is a hack that fixes these failures. It shouldn't be necessary.
104 * spec@ext_transform_feedback@immediate-reuse
105 * spec@ext_transform_feedback@immediate-reuse-index-buffer
106 * spec@ext_transform_feedback@immediate-reuse-uniform-buffer
107 */
108 if (sctx->gfx_level >= GFX11 && old_num_targets)
109 si_flush_gfx_cs(sctx, 0, NULL);
110
111 /* Streamout buffers must be bound in 2 places:
112 * 1) in VGT by setting the VGT_STRMOUT registers
113 * 2) as shader resources
114 */
115 unsigned enabled_mask = 0, append_bitmask = 0;
116
117 for (i = 0; i < num_targets; i++) {
118 si_so_target_reference(&sctx->streamout.targets[i], targets[i]);
119
120 if (!targets[i]) {
121 si_set_internal_shader_buffer(sctx, SI_VS_STREAMOUT_BUF0 + i, NULL);
122 continue;
123 }
124
125 enabled_mask |= 1 << i;
126
127 if (offsets[i] == ((unsigned)-1))
128 append_bitmask |= 1 << i;
129
130 /* Allocate space for the filled buffer size. */
131 struct si_streamout_target *t = sctx->streamout.targets[i];
132 if (!t->buf_filled_size) {
133 unsigned buf_filled_size_size = sctx->gfx_level >= GFX11 ? 8 : 4;
134 u_suballocator_alloc(&sctx->allocator_zeroed_memory, buf_filled_size_size, 4,
135 &t->buf_filled_size_offset,
136 (struct pipe_resource **)&t->buf_filled_size);
137 }
138
139 /* Bind it to the shader. */
140 struct pipe_shader_buffer sbuf;
141 sbuf.buffer = targets[i]->buffer;
142
143 if (sctx->gfx_level >= GFX11) {
144 sbuf.buffer_offset = targets[i]->buffer_offset;
145 sbuf.buffer_size = targets[i]->buffer_size;
146 } else {
147 sbuf.buffer_offset = 0;
148 sbuf.buffer_size = targets[i]->buffer_offset + targets[i]->buffer_size;
149 }
150
151 si_set_internal_shader_buffer(sctx, SI_VS_STREAMOUT_BUF0 + i, &sbuf);
152 si_resource(targets[i]->buffer)->bind_history |= SI_BIND_STREAMOUT_BUFFER;
153 }
154 for (; i < old_num_targets; i++) {
155 si_so_target_reference(&sctx->streamout.targets[i], NULL);
156 si_set_internal_shader_buffer(sctx, SI_VS_STREAMOUT_BUF0 + i, NULL);
157 }
158
159 if (!!sctx->streamout.enabled_mask != !!enabled_mask)
160 sctx->do_update_shaders = true; /* to keep/remove streamout shader code as an optimization */
161
162 sctx->streamout.num_targets = num_targets;
163 sctx->streamout.enabled_mask = enabled_mask;
164 sctx->streamout.append_bitmask = append_bitmask;
165
166 /* Update dirty state bits. */
167 if (num_targets) {
168 si_streamout_buffers_dirty(sctx);
169
170 /* All readers of the streamout targets need to be finished before we can
171 * start writing to them.
172 */
173 sctx->flags |= SI_CONTEXT_PS_PARTIAL_FLUSH | SI_CONTEXT_CS_PARTIAL_FLUSH |
174 SI_CONTEXT_PFP_SYNC_ME;
175 si_mark_atom_dirty(sctx, &sctx->atoms.s.cache_flush);
176 } else {
177 si_set_atom_dirty(sctx, &sctx->atoms.s.streamout_begin, false);
178 si_set_streamout_enable(sctx, false);
179 }
180 }
181
si_flush_vgt_streamout(struct si_context * sctx)182 static void si_flush_vgt_streamout(struct si_context *sctx)
183 {
184 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
185 unsigned reg_strmout_cntl;
186
187 radeon_begin(cs);
188
189 /* The register is at different places on different ASICs. */
190 if (sctx->gfx_level >= GFX9) {
191 reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
192 radeon_emit(PKT3(PKT3_WRITE_DATA, 3, 0));
193 radeon_emit(S_370_DST_SEL(V_370_MEM_MAPPED_REGISTER) | S_370_ENGINE_SEL(V_370_ME));
194 radeon_emit(R_0300FC_CP_STRMOUT_CNTL >> 2);
195 radeon_emit(0);
196 radeon_emit(0);
197 } else if (sctx->gfx_level >= GFX7) {
198 reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
199 radeon_set_uconfig_reg(reg_strmout_cntl, 0);
200 } else {
201 reg_strmout_cntl = R_0084FC_CP_STRMOUT_CNTL;
202 radeon_set_config_reg(reg_strmout_cntl, 0);
203 }
204
205 radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
206 radeon_emit(EVENT_TYPE(V_028A90_SO_VGTSTREAMOUT_FLUSH) | EVENT_INDEX(0));
207
208 radeon_emit(PKT3(PKT3_WAIT_REG_MEM, 5, 0));
209 radeon_emit(WAIT_REG_MEM_EQUAL); /* wait until the register is equal to the reference value */
210 radeon_emit(reg_strmout_cntl >> 2); /* register */
211 radeon_emit(0);
212 radeon_emit(S_0084FC_OFFSET_UPDATE_DONE(1)); /* reference value */
213 radeon_emit(S_0084FC_OFFSET_UPDATE_DONE(1)); /* mask */
214 radeon_emit(4); /* poll interval */
215 radeon_end();
216 }
217
si_emit_streamout_begin(struct si_context * sctx,unsigned index)218 static void si_emit_streamout_begin(struct si_context *sctx, unsigned index)
219 {
220 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
221 struct si_streamout_target **t = sctx->streamout.targets;
222
223 if (sctx->gfx_level < GFX11)
224 si_flush_vgt_streamout(sctx);
225
226 for (unsigned i = 0; i < sctx->streamout.num_targets; i++) {
227 if (!t[i])
228 continue;
229
230 t[i]->stride_in_dw = sctx->streamout.stride_in_dw[i];
231
232 if (sctx->gfx_level >= GFX11) {
233 if (sctx->streamout.append_bitmask & (1 << i)) {
234 /* Restore the register value. */
235 si_cp_copy_data(sctx, cs, COPY_DATA_REG, NULL,
236 (R_031088_GDS_STRMOUT_DWORDS_WRITTEN_0 / 4) + i,
237 COPY_DATA_SRC_MEM, t[i]->buf_filled_size,
238 t[i]->buf_filled_size_offset);
239 } else {
240 /* Set to 0. */
241 radeon_begin(cs);
242 radeon_set_uconfig_reg(R_031088_GDS_STRMOUT_DWORDS_WRITTEN_0 + i * 4, 0);
243 radeon_end();
244 }
245 } else {
246 /* Legacy streamout.
247 *
248 * The hw binds streamout buffers as shader resources. VGT only counts primitives
249 * and tells the shader through SGPRs what to do.
250 */
251 radeon_begin(cs);
252 radeon_set_context_reg_seq(R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16 * i, 2);
253 radeon_emit((t[i]->b.buffer_offset + t[i]->b.buffer_size) >> 2); /* BUFFER_SIZE (in DW) */
254 radeon_emit(sctx->streamout.stride_in_dw[i]); /* VTX_STRIDE (in DW) */
255
256 if (sctx->streamout.append_bitmask & (1 << i) && t[i]->buf_filled_size_valid) {
257 uint64_t va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset;
258
259 /* Append. */
260 radeon_emit(PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
261 radeon_emit(STRMOUT_SELECT_BUFFER(i) |
262 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_MEM)); /* control */
263 radeon_emit(0); /* unused */
264 radeon_emit(0); /* unused */
265 radeon_emit(va); /* src address lo */
266 radeon_emit(va >> 32); /* src address hi */
267
268 radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, t[i]->buf_filled_size,
269 RADEON_USAGE_READ | RADEON_PRIO_SO_FILLED_SIZE);
270 } else {
271 /* Start from the beginning. */
272 radeon_emit(PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
273 radeon_emit(STRMOUT_SELECT_BUFFER(i) |
274 STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_FROM_PACKET)); /* control */
275 radeon_emit(0); /* unused */
276 radeon_emit(0); /* unused */
277 radeon_emit(t[i]->b.buffer_offset >> 2); /* buffer offset in DW */
278 radeon_emit(0); /* unused */
279 }
280 radeon_end_update_context_roll(sctx);
281 }
282 }
283
284 sctx->streamout.begin_emitted = true;
285 }
286
si_emit_streamout_end(struct si_context * sctx)287 void si_emit_streamout_end(struct si_context *sctx)
288 {
289 struct radeon_cmdbuf *cs = &sctx->gfx_cs;
290 struct si_streamout_target **t = sctx->streamout.targets;
291
292 if (sctx->gfx_level >= GFX11) {
293 /* Wait for streamout to finish before reading GDS_STRMOUT registers. */
294 sctx->flags |= SI_CONTEXT_VS_PARTIAL_FLUSH;
295 si_emit_cache_flush_direct(sctx);
296 } else {
297 si_flush_vgt_streamout(sctx);
298 }
299
300 for (unsigned i = 0; i < sctx->streamout.num_targets; i++) {
301 if (!t[i])
302 continue;
303
304 if (sctx->gfx_level >= GFX11) {
305 si_cp_copy_data(sctx, &sctx->gfx_cs, COPY_DATA_DST_MEM,
306 t[i]->buf_filled_size, t[i]->buf_filled_size_offset,
307 COPY_DATA_REG, NULL,
308 (R_031088_GDS_STRMOUT_DWORDS_WRITTEN_0 >> 2) + i);
309 sctx->flags |= SI_CONTEXT_PFP_SYNC_ME;
310 si_mark_atom_dirty(sctx, &sctx->atoms.s.cache_flush);
311 } else {
312 uint64_t va = t[i]->buf_filled_size->gpu_address + t[i]->buf_filled_size_offset;
313
314 radeon_begin(cs);
315 radeon_emit(PKT3(PKT3_STRMOUT_BUFFER_UPDATE, 4, 0));
316 radeon_emit(STRMOUT_SELECT_BUFFER(i) | STRMOUT_OFFSET_SOURCE(STRMOUT_OFFSET_NONE) |
317 STRMOUT_STORE_BUFFER_FILLED_SIZE); /* control */
318 radeon_emit(va); /* dst address lo */
319 radeon_emit(va >> 32); /* dst address hi */
320 radeon_emit(0); /* unused */
321 radeon_emit(0); /* unused */
322
323 /* Zero the buffer size. The counters (primitives generated,
324 * primitives emitted) may be enabled even if there is not
325 * buffer bound. This ensures that the primitives-emitted query
326 * won't increment. */
327 radeon_set_context_reg(R_028AD0_VGT_STRMOUT_BUFFER_SIZE_0 + 16 * i, 0);
328 radeon_end_update_context_roll(sctx);
329
330 radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, t[i]->buf_filled_size,
331 RADEON_USAGE_WRITE | RADEON_PRIO_SO_FILLED_SIZE);
332 }
333
334 t[i]->buf_filled_size_valid = true;
335 }
336
337 sctx->streamout.begin_emitted = false;
338 }
339
340 /* STREAMOUT CONFIG DERIVED STATE
341 *
342 * Streamout must be enabled for the PRIMITIVES_GENERATED query to work.
343 * The buffer mask is an independent state, so no writes occur if there
344 * are no buffers bound.
345 */
346
si_emit_streamout_enable(struct si_context * sctx,unsigned index)347 static void si_emit_streamout_enable(struct si_context *sctx, unsigned index)
348 {
349 assert(sctx->gfx_level < GFX11);
350
351 radeon_begin(&sctx->gfx_cs);
352 radeon_set_context_reg_seq(R_028B94_VGT_STRMOUT_CONFIG, 2);
353 radeon_emit(S_028B94_STREAMOUT_0_EN(si_get_strmout_en(sctx)) |
354 S_028B94_RAST_STREAM(0) |
355 S_028B94_STREAMOUT_1_EN(si_get_strmout_en(sctx)) |
356 S_028B94_STREAMOUT_2_EN(si_get_strmout_en(sctx)) |
357 S_028B94_STREAMOUT_3_EN(si_get_strmout_en(sctx)));
358 radeon_emit(sctx->streamout.hw_enabled_mask & sctx->streamout.enabled_stream_buffers_mask);
359 radeon_end();
360 }
361
si_set_streamout_enable(struct si_context * sctx,bool enable)362 static void si_set_streamout_enable(struct si_context *sctx, bool enable)
363 {
364 if (sctx->gfx_level >= GFX11)
365 return;
366
367 bool old_strmout_en = si_get_strmout_en(sctx);
368 unsigned old_hw_enabled_mask = sctx->streamout.hw_enabled_mask;
369
370 sctx->streamout.streamout_enabled = enable;
371
372 sctx->streamout.hw_enabled_mask =
373 sctx->streamout.enabled_mask | (sctx->streamout.enabled_mask << 4) |
374 (sctx->streamout.enabled_mask << 8) | (sctx->streamout.enabled_mask << 12);
375
376 if ((old_strmout_en != si_get_strmout_en(sctx)) ||
377 (old_hw_enabled_mask != sctx->streamout.hw_enabled_mask))
378 si_mark_atom_dirty(sctx, &sctx->atoms.s.streamout_enable);
379 }
380
si_update_prims_generated_query_state(struct si_context * sctx,unsigned type,int diff)381 void si_update_prims_generated_query_state(struct si_context *sctx, unsigned type, int diff)
382 {
383 if (sctx->gfx_level < GFX11 && type == PIPE_QUERY_PRIMITIVES_GENERATED) {
384 bool old_strmout_en = si_get_strmout_en(sctx);
385
386 sctx->streamout.num_prims_gen_queries += diff;
387 assert(sctx->streamout.num_prims_gen_queries >= 0);
388
389 sctx->streamout.prims_gen_query_enabled = sctx->streamout.num_prims_gen_queries != 0;
390
391 if (old_strmout_en != si_get_strmout_en(sctx))
392 si_mark_atom_dirty(sctx, &sctx->atoms.s.streamout_enable);
393
394 if (si_update_ngg(sctx)) {
395 si_shader_change_notify(sctx);
396 sctx->do_update_shaders = true;
397 }
398 }
399 }
400
si_init_streamout_functions(struct si_context * sctx)401 void si_init_streamout_functions(struct si_context *sctx)
402 {
403 sctx->b.create_stream_output_target = si_create_so_target;
404 sctx->b.stream_output_target_destroy = si_so_target_destroy;
405 sctx->b.set_stream_output_targets = si_set_streamout_targets;
406 sctx->atoms.s.streamout_begin.emit = si_emit_streamout_begin;
407
408 if (sctx->gfx_level < GFX11)
409 sctx->atoms.s.streamout_enable.emit = si_emit_streamout_enable;
410 }
411