1 /*
2 * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "ir3/ir3_nir.h"
28
29 /* This has to reach into the fd_context a bit more than the rest of
30 * ir3, but it needs to be aligned with the compiler, so both agree
31 * on which const regs hold what. And the logic is identical between
32 * ir3 generations, the only difference is small details in the actual
33 * CP_LOAD_STATE packets (which is handled inside the generation
34 * specific ctx->emit_const(_bo)() fxns)
35 *
36 * This file should be included in only a single .c file per gen, which
37 * defines the following functions:
38 */
39
40 static bool is_stateobj(struct fd_ringbuffer *ring);
41
42 static void emit_const_user(struct fd_ringbuffer *ring,
43 const struct ir3_shader_variant *v, uint32_t regid,
44 uint32_t size, const uint32_t *user_buffer);
45
46 static void emit_const_bo(struct fd_ringbuffer *ring,
47 const struct ir3_shader_variant *v, uint32_t regid,
48 uint32_t offset, uint32_t size,
49 struct fd_bo *bo);
50
emit_const_prsc(struct fd_ringbuffer * ring,const struct ir3_shader_variant * v,uint32_t regid,uint32_t offset,uint32_t size,struct pipe_resource * buffer)51 static void emit_const_prsc(struct fd_ringbuffer *ring,
52 const struct ir3_shader_variant *v, uint32_t regid,
53 uint32_t offset, uint32_t size,
54 struct pipe_resource *buffer)
55 {
56 struct fd_resource *rsc = fd_resource(buffer);
57 emit_const_bo(ring, v, regid, offset, size, rsc->bo);
58 }
59
60 static void emit_const_ptrs(struct fd_ringbuffer *ring,
61 const struct ir3_shader_variant *v, uint32_t dst_offset,
62 uint32_t num, struct pipe_resource **prscs, uint32_t *offsets);
63
64 static void
emit_const_asserts(struct fd_ringbuffer * ring,const struct ir3_shader_variant * v,uint32_t regid,uint32_t sizedwords)65 emit_const_asserts(struct fd_ringbuffer *ring,
66 const struct ir3_shader_variant *v,
67 uint32_t regid, uint32_t sizedwords)
68 {
69 assert((regid % 4) == 0);
70 assert((sizedwords % 4) == 0);
71 assert(regid + sizedwords <= v->constlen * 4);
72 }
73
74 static void
ring_wfi(struct fd_batch * batch,struct fd_ringbuffer * ring)75 ring_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring)
76 {
77 /* when we emit const state via ring (IB2) we need a WFI, but when
78 * it is emit'd via stateobj, we don't
79 */
80 if (is_stateobj(ring))
81 return;
82
83 fd_wfi(batch, ring);
84 }
85
86 /**
87 * Indirectly calculates size of cmdstream needed for ir3_emit_user_consts().
88 * Returns number of packets, and total size of all the payload.
89 *
90 * The value can be a worst-case, ie. some shader variants may not read all
91 * consts, etc.
92 *
93 * Returns size in dwords.
94 */
95 static inline void
ir3_user_consts_size(struct ir3_ubo_analysis_state * state,unsigned * packets,unsigned * size)96 ir3_user_consts_size(struct ir3_ubo_analysis_state *state,
97 unsigned *packets, unsigned *size)
98 {
99 *packets = *size = 0;
100
101 for (uint32_t i = 0; i < ARRAY_SIZE(state->range); i++) {
102 if (state->range[i].start < state->range[i].end) {
103 *size += state->range[i].end - state->range[i].start;
104 (*packets)++;
105 }
106 }
107 }
108
109 /**
110 * Uploads sub-ranges of UBOs to the hardware's constant buffer (UBO access
111 * outside of these ranges will be done using full UBO accesses in the
112 * shader).
113 */
114 static inline void
ir3_emit_user_consts(struct fd_screen * screen,const struct ir3_shader_variant * v,struct fd_ringbuffer * ring,struct fd_constbuf_stateobj * constbuf)115 ir3_emit_user_consts(struct fd_screen *screen, const struct ir3_shader_variant *v,
116 struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf)
117 {
118 const struct ir3_const_state *const_state = ir3_const_state(v);
119 const struct ir3_ubo_analysis_state *state = &const_state->ubo_state;
120
121 for (unsigned i = 0; i < state->num_enabled; i++) {
122 assert(!state->range[i].ubo.bindless);
123 unsigned ubo = state->range[i].ubo.block;
124 if (!(constbuf->enabled_mask & (1 << ubo)))
125 continue;
126 struct pipe_constant_buffer *cb = &constbuf->cb[ubo];
127
128 uint32_t size = state->range[i].end - state->range[i].start;
129 uint32_t offset = cb->buffer_offset + state->range[i].start;
130
131 /* Pre-a6xx, we might have ranges enabled in the shader that aren't
132 * used in the binning variant.
133 */
134 if (16 * v->constlen <= state->range[i].offset)
135 continue;
136
137 /* and even if the start of the const buffer is before
138 * first_immediate, the end may not be:
139 */
140 size = MIN2(size, (16 * v->constlen) - state->range[i].offset);
141
142 if (size == 0)
143 continue;
144
145 /* things should be aligned to vec4: */
146 debug_assert((state->range[i].offset % 16) == 0);
147 debug_assert((size % 16) == 0);
148 debug_assert((offset % 16) == 0);
149
150 if (cb->user_buffer) {
151 emit_const_user(ring, v, state->range[i].offset / 4,
152 size / 4, cb->user_buffer + state->range[i].start);
153 } else {
154 emit_const_prsc(ring, v, state->range[i].offset / 4,
155 offset, size / 4, cb->buffer);
156 }
157 }
158 }
159
160 static inline void
ir3_emit_ubos(struct fd_context * ctx,const struct ir3_shader_variant * v,struct fd_ringbuffer * ring,struct fd_constbuf_stateobj * constbuf)161 ir3_emit_ubos(struct fd_context *ctx, const struct ir3_shader_variant *v,
162 struct fd_ringbuffer *ring, struct fd_constbuf_stateobj *constbuf)
163 {
164 const struct ir3_const_state *const_state = ir3_const_state(v);
165 uint32_t offset = const_state->offsets.ubo;
166
167 /* a6xx+ uses UBO state and ldc instead of pointers emitted in
168 * const state and ldg:
169 */
170 if (ctx->screen->gpu_id >= 600)
171 return;
172
173 if (v->constlen > offset) {
174 uint32_t params = const_state->num_ubos;
175 uint32_t offsets[params];
176 struct pipe_resource *prscs[params];
177
178 for (uint32_t i = 0; i < params; i++) {
179 struct pipe_constant_buffer *cb = &constbuf->cb[i];
180
181 /* If we have user pointers (constbuf 0, aka GL uniforms), upload
182 * them to a buffer now, and save it in the constbuf so that we
183 * don't have to reupload until they get changed.
184 */
185 if (cb->user_buffer) {
186 struct pipe_context *pctx = &ctx->base;
187 u_upload_data(pctx->stream_uploader, 0,
188 cb->buffer_size,
189 64,
190 cb->user_buffer,
191 &cb->buffer_offset, &cb->buffer);
192 cb->user_buffer = NULL;
193 }
194
195 if ((constbuf->enabled_mask & (1 << i)) && cb->buffer) {
196 offsets[i] = cb->buffer_offset;
197 prscs[i] = cb->buffer;
198 } else {
199 offsets[i] = 0;
200 prscs[i] = NULL;
201 }
202 }
203
204 assert(offset * 4 + params <= v->constlen * 4);
205
206 emit_const_ptrs(ring, v, offset * 4, params, prscs, offsets);
207 }
208 }
209
210 static inline void
ir3_emit_ssbo_sizes(struct fd_screen * screen,const struct ir3_shader_variant * v,struct fd_ringbuffer * ring,struct fd_shaderbuf_stateobj * sb)211 ir3_emit_ssbo_sizes(struct fd_screen *screen, const struct ir3_shader_variant *v,
212 struct fd_ringbuffer *ring, struct fd_shaderbuf_stateobj *sb)
213 {
214 const struct ir3_const_state *const_state = ir3_const_state(v);
215 uint32_t offset = const_state->offsets.ssbo_sizes;
216 if (v->constlen > offset) {
217 uint32_t sizes[align(const_state->ssbo_size.count, 4)];
218 unsigned mask = const_state->ssbo_size.mask;
219
220 while (mask) {
221 unsigned index = u_bit_scan(&mask);
222 unsigned off = const_state->ssbo_size.off[index];
223 sizes[off] = sb->sb[index].buffer_size;
224 }
225
226 emit_const_user(ring, v, offset * 4, ARRAY_SIZE(sizes), sizes);
227 }
228 }
229
230 static inline void
ir3_emit_image_dims(struct fd_screen * screen,const struct ir3_shader_variant * v,struct fd_ringbuffer * ring,struct fd_shaderimg_stateobj * si)231 ir3_emit_image_dims(struct fd_screen *screen, const struct ir3_shader_variant *v,
232 struct fd_ringbuffer *ring, struct fd_shaderimg_stateobj *si)
233 {
234 const struct ir3_const_state *const_state = ir3_const_state(v);
235 uint32_t offset = const_state->offsets.image_dims;
236 if (v->constlen > offset) {
237 uint32_t dims[align(const_state->image_dims.count, 4)];
238 unsigned mask = const_state->image_dims.mask;
239
240 while (mask) {
241 struct pipe_image_view *img;
242 struct fd_resource *rsc;
243 unsigned index = u_bit_scan(&mask);
244 unsigned off = const_state->image_dims.off[index];
245
246 img = &si->si[index];
247 rsc = fd_resource(img->resource);
248
249 dims[off + 0] = util_format_get_blocksize(img->format);
250 if (img->resource->target != PIPE_BUFFER) {
251 struct fdl_slice *slice =
252 fd_resource_slice(rsc, img->u.tex.level);
253 /* note for 2d/cube/etc images, even if re-interpreted
254 * as a different color format, the pixel size should
255 * be the same, so use original dimensions for y and z
256 * stride:
257 */
258 dims[off + 1] = fd_resource_pitch(rsc, img->u.tex.level);
259 /* see corresponding logic in fd_resource_offset(): */
260 if (rsc->layout.layer_first) {
261 dims[off + 2] = rsc->layout.layer_size;
262 } else {
263 dims[off + 2] = slice->size0;
264 }
265 } else {
266 /* For buffer-backed images, the log2 of the format's
267 * bytes-per-pixel is placed on the 2nd slot. This is useful
268 * when emitting image_size instructions, for which we need
269 * to divide by bpp for image buffers. Since the bpp
270 * can only be power-of-two, the division is implemented
271 * as a SHR, and for that it is handy to have the log2 of
272 * bpp as a constant. (log2 = first-set-bit - 1)
273 */
274 dims[off + 1] = ffs(dims[off + 0]) - 1;
275 }
276 }
277 uint32_t size = MIN2(ARRAY_SIZE(dims), v->constlen * 4 - offset * 4);
278
279 emit_const_user(ring, v, offset * 4, size, dims);
280 }
281 }
282
283 static inline void
ir3_emit_immediates(struct fd_screen * screen,const struct ir3_shader_variant * v,struct fd_ringbuffer * ring)284 ir3_emit_immediates(struct fd_screen *screen, const struct ir3_shader_variant *v,
285 struct fd_ringbuffer *ring)
286 {
287 const struct ir3_const_state *const_state = ir3_const_state(v);
288 uint32_t base = const_state->offsets.immediate;
289 int size = DIV_ROUND_UP(const_state->immediates_count, 4);
290
291 /* truncate size to avoid writing constants that shader
292 * does not use:
293 */
294 size = MIN2(size + base, v->constlen) - base;
295
296 /* convert out of vec4: */
297 base *= 4;
298 size *= 4;
299
300 if (size > 0)
301 emit_const_user(ring, v, base, size, const_state->immediates);
302 }
303
304 static inline void
ir3_emit_link_map(struct fd_screen * screen,const struct ir3_shader_variant * producer,const struct ir3_shader_variant * v,struct fd_ringbuffer * ring)305 ir3_emit_link_map(struct fd_screen *screen,
306 const struct ir3_shader_variant *producer,
307 const struct ir3_shader_variant *v, struct fd_ringbuffer *ring)
308 {
309 const struct ir3_const_state *const_state = ir3_const_state(v);
310 uint32_t base = const_state->offsets.primitive_map;
311 int size = DIV_ROUND_UP(v->input_size, 4);
312
313 /* truncate size to avoid writing constants that shader
314 * does not use:
315 */
316 size = MIN2(size + base, v->constlen) - base;
317
318 /* convert out of vec4: */
319 base *= 4;
320 size *= 4;
321
322 if (size > 0)
323 emit_const_user(ring, v, base, size, producer->output_loc);
324 }
325
326 /* emit stream-out buffers: */
327 static inline void
emit_tfbos(struct fd_context * ctx,const struct ir3_shader_variant * v,struct fd_ringbuffer * ring)328 emit_tfbos(struct fd_context *ctx, const struct ir3_shader_variant *v,
329 struct fd_ringbuffer *ring)
330 {
331 /* streamout addresses after driver-params: */
332 const struct ir3_const_state *const_state = ir3_const_state(v);
333 uint32_t offset = const_state->offsets.tfbo;
334 if (v->constlen > offset) {
335 struct fd_streamout_stateobj *so = &ctx->streamout;
336 struct ir3_stream_output_info *info = &v->shader->stream_output;
337 uint32_t params = 4;
338 uint32_t offsets[params];
339 struct pipe_resource *prscs[params];
340
341 for (uint32_t i = 0; i < params; i++) {
342 struct pipe_stream_output_target *target = so->targets[i];
343
344 if (target) {
345 offsets[i] = (so->offsets[i] * info->stride[i] * 4) +
346 target->buffer_offset;
347 prscs[i] = target->buffer;
348 } else {
349 offsets[i] = 0;
350 prscs[i] = NULL;
351 }
352 }
353
354 assert(offset * 4 + params <= v->constlen * 4);
355
356 emit_const_ptrs(ring, v, offset * 4, params, prscs, offsets);
357 }
358 }
359
360 static inline uint32_t
max_tf_vtx(struct fd_context * ctx,const struct ir3_shader_variant * v)361 max_tf_vtx(struct fd_context *ctx, const struct ir3_shader_variant *v)
362 {
363 struct fd_streamout_stateobj *so = &ctx->streamout;
364 struct ir3_stream_output_info *info = &v->shader->stream_output;
365 uint32_t maxvtxcnt = 0x7fffffff;
366
367 if (ctx->screen->gpu_id >= 500)
368 return 0;
369 if (v->binning_pass)
370 return 0;
371 if (v->shader->stream_output.num_outputs == 0)
372 return 0;
373 if (so->num_targets == 0)
374 return 0;
375
376 /* offset to write to is:
377 *
378 * total_vtxcnt = vtxcnt + offsets[i]
379 * offset = total_vtxcnt * stride[i]
380 *
381 * offset = vtxcnt * stride[i] ; calculated in shader
382 * + offsets[i] * stride[i] ; calculated at emit_tfbos()
383 *
384 * assuming for each vtx, each target buffer will have data written
385 * up to 'offset + stride[i]', that leaves maxvtxcnt as:
386 *
387 * buffer_size = (maxvtxcnt * stride[i]) + stride[i]
388 * maxvtxcnt = (buffer_size - stride[i]) / stride[i]
389 *
390 * but shader is actually doing a less-than (rather than less-than-
391 * equal) check, so we can drop the -stride[i].
392 *
393 * TODO is assumption about `offset + stride[i]` legit?
394 */
395 for (unsigned i = 0; i < so->num_targets; i++) {
396 struct pipe_stream_output_target *target = so->targets[i];
397 unsigned stride = info->stride[i] * 4; /* convert dwords->bytes */
398 if (target) {
399 uint32_t max = target->buffer_size / stride;
400 maxvtxcnt = MIN2(maxvtxcnt, max);
401 }
402 }
403
404 return maxvtxcnt;
405 }
406
407 static inline void
emit_common_consts(const struct ir3_shader_variant * v,struct fd_ringbuffer * ring,struct fd_context * ctx,enum pipe_shader_type t)408 emit_common_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
409 struct fd_context *ctx, enum pipe_shader_type t)
410 {
411 enum fd_dirty_shader_state dirty = ctx->dirty_shader[t];
412
413 /* When we use CP_SET_DRAW_STATE objects to emit constant state,
414 * if we emit any of it we need to emit all. This is because
415 * we are using the same state-group-id each time for uniform
416 * state, and if previous update is never evaluated (due to no
417 * visible primitives in the current tile) then the new stateobj
418 * completely replaces the old one.
419 *
420 * Possibly if we split up different parts of the const state to
421 * different state-objects we could avoid this.
422 */
423 if (dirty && is_stateobj(ring))
424 dirty = ~0;
425
426 if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_CONST)) {
427 struct fd_constbuf_stateobj *constbuf;
428 bool shader_dirty;
429
430 constbuf = &ctx->constbuf[t];
431 shader_dirty = !!(dirty & FD_DIRTY_SHADER_PROG);
432
433 ring_wfi(ctx->batch, ring);
434
435 ir3_emit_user_consts(ctx->screen, v, ring, constbuf);
436 ir3_emit_ubos(ctx, v, ring, constbuf);
437 if (shader_dirty)
438 ir3_emit_immediates(ctx->screen, v, ring);
439 }
440
441 if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_SSBO)) {
442 struct fd_shaderbuf_stateobj *sb = &ctx->shaderbuf[t];
443 ring_wfi(ctx->batch, ring);
444 ir3_emit_ssbo_sizes(ctx->screen, v, ring, sb);
445 }
446
447 if (dirty & (FD_DIRTY_SHADER_PROG | FD_DIRTY_SHADER_IMAGE)) {
448 struct fd_shaderimg_stateobj *si = &ctx->shaderimg[t];
449 ring_wfi(ctx->batch, ring);
450 ir3_emit_image_dims(ctx->screen, v, ring, si);
451 }
452 }
453
454 static inline bool
ir3_needs_vs_driver_params(const struct ir3_shader_variant * v)455 ir3_needs_vs_driver_params(const struct ir3_shader_variant *v)
456 {
457 const struct ir3_const_state *const_state = ir3_const_state(v);
458 uint32_t offset = const_state->offsets.driver_param;
459
460 return v->constlen > offset;
461 }
462
463 static inline void
ir3_emit_vs_driver_params(const struct ir3_shader_variant * v,struct fd_ringbuffer * ring,struct fd_context * ctx,const struct pipe_draw_info * info)464 ir3_emit_vs_driver_params(const struct ir3_shader_variant *v,
465 struct fd_ringbuffer *ring, struct fd_context *ctx,
466 const struct pipe_draw_info *info)
467 {
468 debug_assert(ir3_needs_vs_driver_params(v));
469
470 const struct ir3_const_state *const_state = ir3_const_state(v);
471 uint32_t offset = const_state->offsets.driver_param;
472 uint32_t vertex_params[IR3_DP_VS_COUNT] = {
473 [IR3_DP_DRAWID] = 0, /* filled by hw (CP_DRAW_INDIRECT_MULTI) */
474 [IR3_DP_VTXID_BASE] = info->index_size ?
475 info->index_bias : info->start,
476 [IR3_DP_INSTID_BASE] = info->start_instance,
477 [IR3_DP_VTXCNT_MAX] = max_tf_vtx(ctx, v),
478 };
479 if (v->key.ucp_enables) {
480 struct pipe_clip_state *ucp = &ctx->ucp;
481 unsigned pos = IR3_DP_UCP0_X;
482 for (unsigned i = 0; pos <= IR3_DP_UCP7_W; i++) {
483 for (unsigned j = 0; j < 4; j++) {
484 vertex_params[pos] = fui(ucp->ucp[i][j]);
485 pos++;
486 }
487 }
488 }
489
490 /* Only emit as many params as needed, i.e. up to the highest enabled UCP
491 * plane. However a binning pass may drop even some of these, so limit to
492 * program max.
493 */
494 const uint32_t vertex_params_size = MIN2(
495 const_state->num_driver_params,
496 (v->constlen - offset) * 4);
497 assert(vertex_params_size <= IR3_DP_VS_COUNT);
498
499 bool needs_vtxid_base =
500 ir3_find_sysval_regid(v, SYSTEM_VALUE_VERTEX_ID_ZERO_BASE) != regid(63, 0);
501
502 /* for indirect draw, we need to copy VTXID_BASE from
503 * indirect-draw parameters buffer.. which is annoying
504 * and means we can't easily emit these consts in cmd
505 * stream so need to copy them to bo.
506 */
507 if (info->indirect && needs_vtxid_base) {
508 struct pipe_draw_indirect_info *indirect = info->indirect;
509 struct pipe_resource *vertex_params_rsc =
510 pipe_buffer_create(&ctx->screen->base,
511 PIPE_BIND_CONSTANT_BUFFER, PIPE_USAGE_STREAM,
512 vertex_params_size * 4);
513 unsigned src_off = info->indirect->offset;;
514 void *ptr;
515
516 ptr = fd_bo_map(fd_resource(vertex_params_rsc)->bo);
517 memcpy(ptr, vertex_params, vertex_params_size * 4);
518
519 if (info->index_size) {
520 /* indexed draw, index_bias is 4th field: */
521 src_off += 3 * 4;
522 } else {
523 /* non-indexed draw, start is 3rd field: */
524 src_off += 2 * 4;
525 }
526
527 /* copy index_bias or start from draw params: */
528 ctx->screen->mem_to_mem(ring, vertex_params_rsc, 0,
529 indirect->buffer, src_off, 1);
530
531 emit_const_prsc(ring, v, offset * 4, 0,
532 vertex_params_size, vertex_params_rsc);
533
534 pipe_resource_reference(&vertex_params_rsc, NULL);
535 } else {
536 emit_const_user(ring, v, offset * 4,
537 vertex_params_size, vertex_params);
538 }
539
540 /* if needed, emit stream-out buffer addresses: */
541 if (vertex_params[IR3_DP_VTXCNT_MAX] > 0) {
542 emit_tfbos(ctx, v, ring);
543 }
544 }
545
546 static inline void
ir3_emit_vs_consts(const struct ir3_shader_variant * v,struct fd_ringbuffer * ring,struct fd_context * ctx,const struct pipe_draw_info * info)547 ir3_emit_vs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
548 struct fd_context *ctx, const struct pipe_draw_info *info)
549 {
550 debug_assert(v->type == MESA_SHADER_VERTEX);
551
552 emit_common_consts(v, ring, ctx, PIPE_SHADER_VERTEX);
553
554 /* emit driver params every time: */
555 if (info && ir3_needs_vs_driver_params(v)) {
556 ring_wfi(ctx->batch, ring);
557 ir3_emit_vs_driver_params(v, ring, ctx, info);
558 }
559 }
560
561 static inline void
ir3_emit_fs_consts(const struct ir3_shader_variant * v,struct fd_ringbuffer * ring,struct fd_context * ctx)562 ir3_emit_fs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
563 struct fd_context *ctx)
564 {
565 debug_assert(v->type == MESA_SHADER_FRAGMENT);
566
567 emit_common_consts(v, ring, ctx, PIPE_SHADER_FRAGMENT);
568 }
569
570 /* emit compute-shader consts: */
571 static inline void
ir3_emit_cs_consts(const struct ir3_shader_variant * v,struct fd_ringbuffer * ring,struct fd_context * ctx,const struct pipe_grid_info * info)572 ir3_emit_cs_consts(const struct ir3_shader_variant *v, struct fd_ringbuffer *ring,
573 struct fd_context *ctx, const struct pipe_grid_info *info)
574 {
575 debug_assert(gl_shader_stage_is_compute(v->type));
576
577 emit_common_consts(v, ring, ctx, PIPE_SHADER_COMPUTE);
578
579 /* emit compute-shader driver-params: */
580 const struct ir3_const_state *const_state = ir3_const_state(v);
581 uint32_t offset = const_state->offsets.driver_param;
582 if (v->constlen > offset) {
583 ring_wfi(ctx->batch, ring);
584
585 if (info->indirect) {
586 struct pipe_resource *indirect = NULL;
587 unsigned indirect_offset;
588
589 /* This is a bit awkward, but CP_LOAD_STATE.EXT_SRC_ADDR needs
590 * to be aligned more strongly than 4 bytes. So in this case
591 * we need a temporary buffer to copy NumWorkGroups.xyz to.
592 *
593 * TODO if previous compute job is writing to info->indirect,
594 * we might need a WFI.. but since we currently flush for each
595 * compute job, we are probably ok for now.
596 */
597 if (info->indirect_offset & 0xf) {
598 indirect = pipe_buffer_create(&ctx->screen->base,
599 PIPE_BIND_COMMAND_ARGS_BUFFER, PIPE_USAGE_STREAM,
600 0x1000);
601 indirect_offset = 0;
602
603 ctx->screen->mem_to_mem(ring, indirect, 0, info->indirect,
604 info->indirect_offset, 3);
605 } else {
606 pipe_resource_reference(&indirect, info->indirect);
607 indirect_offset = info->indirect_offset;
608 }
609
610 emit_const_prsc(ring, v, offset * 4, indirect_offset, 16, indirect);
611
612 pipe_resource_reference(&indirect, NULL);
613 } else {
614 uint32_t compute_params[IR3_DP_CS_COUNT] = {
615 [IR3_DP_NUM_WORK_GROUPS_X] = info->grid[0],
616 [IR3_DP_NUM_WORK_GROUPS_Y] = info->grid[1],
617 [IR3_DP_NUM_WORK_GROUPS_Z] = info->grid[2],
618 [IR3_DP_LOCAL_GROUP_SIZE_X] = info->block[0],
619 [IR3_DP_LOCAL_GROUP_SIZE_Y] = info->block[1],
620 [IR3_DP_LOCAL_GROUP_SIZE_Z] = info->block[2],
621 };
622 uint32_t size = MIN2(const_state->num_driver_params,
623 v->constlen * 4 - offset * 4);
624
625 emit_const_user(ring, v, offset * 4, size, compute_params);
626 }
627 }
628 }
629