1 /*
2 * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org>
3 * Copyright © 2018 Google, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 * Authors:
25 * Rob Clark <robclark@freedesktop.org>
26 */
27
28 #define FD_BO_NO_HARDPIN 1
29
30 #include "pipe/p_state.h"
31 #include "util/u_memory.h"
32 #include "util/u_prim.h"
33 #include "util/u_string.h"
34
35 #include "freedreno_blitter.h"
36 #include "freedreno_resource.h"
37 #include "freedreno_state.h"
38
39 #include "fd6_barrier.h"
40 #include "fd6_blitter.h"
41 #include "fd6_context.h"
42 #include "fd6_draw.h"
43 #include "fd6_emit.h"
44 #include "fd6_program.h"
45 #include "fd6_vsc.h"
46 #include "fd6_zsa.h"
47
48 #include "fd6_pack.h"
49
50 enum draw_type {
51 DRAW_DIRECT_OP_NORMAL,
52 DRAW_DIRECT_OP_INDEXED,
53 DRAW_INDIRECT_OP_XFB,
54 DRAW_INDIRECT_OP_INDIRECT_COUNT_INDEXED,
55 DRAW_INDIRECT_OP_INDIRECT_COUNT,
56 DRAW_INDIRECT_OP_INDEXED,
57 DRAW_INDIRECT_OP_NORMAL,
58 };
59
60 static inline bool
is_indirect(enum draw_type type)61 is_indirect(enum draw_type type)
62 {
63 return type >= DRAW_INDIRECT_OP_XFB;
64 }
65
66 static inline bool
is_indexed(enum draw_type type)67 is_indexed(enum draw_type type)
68 {
69 switch (type) {
70 case DRAW_DIRECT_OP_INDEXED:
71 case DRAW_INDIRECT_OP_INDIRECT_COUNT_INDEXED:
72 case DRAW_INDIRECT_OP_INDEXED:
73 return true;
74 default:
75 return false;
76 }
77 }
78
79 static void
draw_emit_xfb(struct fd_ringbuffer * ring,struct CP_DRAW_INDX_OFFSET_0 * draw0,const struct pipe_draw_info * info,const struct pipe_draw_indirect_info * indirect)80 draw_emit_xfb(struct fd_ringbuffer *ring, struct CP_DRAW_INDX_OFFSET_0 *draw0,
81 const struct pipe_draw_info *info,
82 const struct pipe_draw_indirect_info *indirect)
83 {
84 struct fd_stream_output_target *target =
85 fd_stream_output_target(indirect->count_from_stream_output);
86 struct fd_resource *offset = fd_resource(target->offset_buf);
87
88 OUT_PKT7(ring, CP_DRAW_AUTO, 6);
89 OUT_RING(ring, pack_CP_DRAW_INDX_OFFSET_0(*draw0).value);
90 OUT_RING(ring, info->instance_count);
91 OUT_RELOC(ring, offset->bo, 0, 0, 0);
92 OUT_RING(
93 ring,
94 0); /* byte counter offset subtraced from the value read from above */
95 OUT_RING(ring, target->stride);
96 }
97
98 static inline unsigned
max_indices(const struct pipe_draw_info * info,unsigned index_offset)99 max_indices(const struct pipe_draw_info *info, unsigned index_offset)
100 {
101 struct pipe_resource *idx = info->index.resource;
102
103 assert((info->index_size == 1) ||
104 (info->index_size == 2) ||
105 (info->index_size == 4));
106
107 /* Conceptually we divide by the index_size. But if we had
108 * log2(index_size) we could convert that into a right-shift
109 * instead. Conveniently the index_size will only be 1, 2,
110 * or 4. And dividing by two (right-shift by one) gives us
111 * the same answer for those three values. So instead of
112 * divide we can do two right-shifts.
113 */
114 unsigned index_size_shift = info->index_size >> 1;
115 return (idx->width0 - index_offset) >> index_size_shift;
116 }
117
118 template <draw_type DRAW>
119 static void
draw_emit_indirect(struct fd_context * ctx,struct fd_ringbuffer * ring,struct CP_DRAW_INDX_OFFSET_0 * draw0,const struct pipe_draw_info * info,const struct pipe_draw_indirect_info * indirect,unsigned index_offset,uint32_t driver_param)120 draw_emit_indirect(struct fd_context *ctx,
121 struct fd_ringbuffer *ring,
122 struct CP_DRAW_INDX_OFFSET_0 *draw0,
123 const struct pipe_draw_info *info,
124 const struct pipe_draw_indirect_info *indirect,
125 unsigned index_offset, uint32_t driver_param)
126 {
127 struct fd_resource *ind = fd_resource(indirect->buffer);
128
129 if (DRAW == DRAW_INDIRECT_OP_INDIRECT_COUNT_INDEXED) {
130 OUT_PKT7(ring, CP_DRAW_INDIRECT_MULTI, 11);
131 OUT_RING(ring, pack_CP_DRAW_INDX_OFFSET_0(*draw0).value);
132 OUT_RING(ring,
133 (A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDIRECT_COUNT_INDEXED)
134 | A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(driver_param)));
135 struct fd_resource *count_buf = fd_resource(indirect->indirect_draw_count);
136 struct pipe_resource *idx = info->index.resource;
137 OUT_RING(ring, indirect->draw_count);
138 OUT_RELOC(ring, fd_resource(idx)->bo, index_offset, 0, 0);
139 OUT_RING(ring, max_indices(info, index_offset));
140 OUT_RELOC(ring, ind->bo, indirect->offset, 0, 0);
141 OUT_RELOC(ring, count_buf->bo, indirect->indirect_draw_count_offset, 0, 0);
142 OUT_RING(ring, indirect->stride);
143 } else if (DRAW == DRAW_INDIRECT_OP_INDEXED) {
144 OUT_PKT7(ring, CP_DRAW_INDIRECT_MULTI, 9);
145 OUT_RING(ring, pack_CP_DRAW_INDX_OFFSET_0(*draw0).value);
146 OUT_RING(ring,
147 (A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDEXED)
148 | A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(driver_param)));
149 struct pipe_resource *idx = info->index.resource;
150 OUT_RING(ring, indirect->draw_count);
151 //index va
152 OUT_RELOC(ring, fd_resource(idx)->bo, index_offset, 0, 0);
153 //max indices
154 OUT_RING(ring, max_indices(info, index_offset));
155 OUT_RELOC(ring, ind->bo, indirect->offset, 0, 0);
156 OUT_RING(ring, indirect->stride);
157 } else if(DRAW == DRAW_INDIRECT_OP_INDIRECT_COUNT) {
158 OUT_PKT7(ring, CP_DRAW_INDIRECT_MULTI, 8);
159 OUT_RING(ring, pack_CP_DRAW_INDX_OFFSET_0(*draw0).value);
160 OUT_RING(ring,
161 (A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_INDIRECT_COUNT)
162 | A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(driver_param)));
163 struct fd_resource *count_buf = fd_resource(indirect->indirect_draw_count);
164 OUT_RING(ring, indirect->draw_count);
165 OUT_RELOC(ring, ind->bo, indirect->offset, 0, 0);
166 OUT_RELOC(ring, count_buf->bo, indirect->indirect_draw_count_offset, 0, 0);
167 OUT_RING(ring, indirect->stride);
168 } else if (DRAW == DRAW_INDIRECT_OP_NORMAL) {
169 OUT_PKT7(ring, CP_DRAW_INDIRECT_MULTI, 6);
170 OUT_RING(ring, pack_CP_DRAW_INDX_OFFSET_0(*draw0).value);
171 OUT_RING(ring,
172 (A6XX_CP_DRAW_INDIRECT_MULTI_1_OPCODE(INDIRECT_OP_NORMAL)
173 | A6XX_CP_DRAW_INDIRECT_MULTI_1_DST_OFF(driver_param)));
174 OUT_RING(ring, indirect->draw_count);
175 OUT_RELOC(ring, ind->bo, indirect->offset, 0, 0);
176 OUT_RING(ring, indirect->stride);
177 }
178 }
179
180 template <draw_type DRAW>
181 static void
draw_emit(struct fd_ringbuffer * ring,struct CP_DRAW_INDX_OFFSET_0 * draw0,const struct pipe_draw_info * info,const struct pipe_draw_start_count_bias * draw,unsigned index_offset)182 draw_emit(struct fd_ringbuffer *ring, struct CP_DRAW_INDX_OFFSET_0 *draw0,
183 const struct pipe_draw_info *info,
184 const struct pipe_draw_start_count_bias *draw, unsigned index_offset)
185 {
186 if (DRAW == DRAW_DIRECT_OP_INDEXED) {
187 assert(!info->has_user_indices);
188
189 struct pipe_resource *idx_buffer = info->index.resource;
190
191 OUT_PKT(ring, CP_DRAW_INDX_OFFSET, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
192 CP_DRAW_INDX_OFFSET_1(.num_instances = info->instance_count),
193 CP_DRAW_INDX_OFFSET_2(.num_indices = draw->count),
194 CP_DRAW_INDX_OFFSET_3(.first_indx = draw->start),
195 A5XX_CP_DRAW_INDX_OFFSET_INDX_BASE(fd_resource(idx_buffer)->bo,
196 index_offset),
197 A5XX_CP_DRAW_INDX_OFFSET_6(.max_indices = max_indices(info, index_offset)));
198 } else if (DRAW == DRAW_DIRECT_OP_NORMAL) {
199 OUT_PKT(ring, CP_DRAW_INDX_OFFSET, pack_CP_DRAW_INDX_OFFSET_0(*draw0),
200 CP_DRAW_INDX_OFFSET_1(.num_instances = info->instance_count),
201 CP_DRAW_INDX_OFFSET_2(.num_indices = draw->count));
202 }
203 }
204
205 static void
fixup_draw_state(struct fd_context * ctx,struct fd6_emit * emit)206 fixup_draw_state(struct fd_context *ctx, struct fd6_emit *emit) assert_dt
207 {
208 if (ctx->last.dirty ||
209 (ctx->last.primitive_restart != emit->primitive_restart)) {
210 /* rasterizer state is effected by primitive-restart: */
211 fd_context_dirty(ctx, FD_DIRTY_RASTERIZER);
212 ctx->last.primitive_restart = emit->primitive_restart;
213 }
214 }
215
216 template <fd6_pipeline_type PIPELINE>
217 static const struct fd6_program_state *
get_program_state(struct fd_context * ctx,const struct pipe_draw_info * info)218 get_program_state(struct fd_context *ctx, const struct pipe_draw_info *info)
219 assert_dt
220 {
221 struct fd6_context *fd6_ctx = fd6_context(ctx);
222 struct ir3_cache_key key = {
223 .vs = (struct ir3_shader_state *)ctx->prog.vs,
224 .gs = (struct ir3_shader_state *)ctx->prog.gs,
225 .fs = (struct ir3_shader_state *)ctx->prog.fs,
226 .clip_plane_enable = ctx->rasterizer->clip_plane_enable,
227 .patch_vertices = HAS_TESS_GS ? ctx->patch_vertices : 0,
228 };
229
230 /* Some gcc versions get confused about designated order, so workaround
231 * by not initializing these inline:
232 */
233 key.key.ucp_enables = ctx->rasterizer->clip_plane_enable;
234 key.key.sample_shading = (ctx->min_samples > 1);
235 key.key.msaa = (ctx->framebuffer.samples > 1);
236 key.key.rasterflat = ctx->rasterizer->flatshade;
237
238 if (PIPELINE == HAS_TESS_GS) {
239 if (info->mode == MESA_PRIM_PATCHES) {
240 struct shader_info *gs_info =
241 ir3_get_shader_info((struct ir3_shader_state *)ctx->prog.gs);
242
243 key.hs = (struct ir3_shader_state *)ctx->prog.hs;
244 key.ds = (struct ir3_shader_state *)ctx->prog.ds;
245
246 struct shader_info *ds_info = ir3_get_shader_info(key.ds);
247 key.key.tessellation = ir3_tess_mode(ds_info->tess._primitive_mode);
248
249 struct shader_info *fs_info = ir3_get_shader_info(key.fs);
250 key.key.tcs_store_primid =
251 BITSET_TEST(ds_info->system_values_read, SYSTEM_VALUE_PRIMITIVE_ID) ||
252 (gs_info && BITSET_TEST(gs_info->system_values_read, SYSTEM_VALUE_PRIMITIVE_ID)) ||
253 (fs_info && (fs_info->inputs_read & (1ull << VARYING_SLOT_PRIMITIVE_ID)));
254 }
255
256 if (key.gs) {
257 key.key.has_gs = true;
258 }
259 }
260
261 ir3_fixup_shader_state(&ctx->base, &key.key);
262
263 if (ctx->gen_dirty & BIT(FD6_GROUP_PROG)) {
264 struct ir3_program_state *s = ir3_cache_lookup(
265 ctx->shader_cache, &key, &ctx->debug);
266 fd6_ctx->prog = fd6_program_state(s);
267 }
268
269 return fd6_ctx->prog;
270 }
271
272 static void
flush_streamout(struct fd_context * ctx,struct fd6_emit * emit)273 flush_streamout(struct fd_context *ctx, struct fd6_emit *emit)
274 assert_dt
275 {
276 if (!emit->streamout_mask)
277 return;
278
279 struct fd_ringbuffer *ring = ctx->batch->draw;
280
281 for (unsigned i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
282 if (emit->streamout_mask & (1 << i)) {
283 enum vgt_event_type evt = (enum vgt_event_type)(FLUSH_SO_0 + i);
284 fd6_event_write(ctx->batch, ring, evt, false);
285 }
286 }
287 }
288
289 template <chip CHIP, fd6_pipeline_type PIPELINE, draw_type DRAW>
290 static void
draw_vbos(struct fd_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws,unsigned index_offset)291 draw_vbos(struct fd_context *ctx, const struct pipe_draw_info *info,
292 unsigned drawid_offset,
293 const struct pipe_draw_indirect_info *indirect,
294 const struct pipe_draw_start_count_bias *draws,
295 unsigned num_draws,
296 unsigned index_offset)
297 assert_dt
298 {
299 struct fd6_context *fd6_ctx = fd6_context(ctx);
300 struct fd6_emit emit;
301
302 emit.ctx = ctx;
303 emit.info = info;
304 emit.indirect = indirect;
305 emit.draw = NULL;
306 emit.rasterflat = ctx->rasterizer->flatshade;
307 emit.sprite_coord_enable = ctx->rasterizer->sprite_coord_enable;
308 emit.sprite_coord_mode = ctx->rasterizer->sprite_coord_mode;
309 emit.primitive_restart = info->primitive_restart && is_indexed(DRAW);
310 emit.state.num_groups = 0;
311 emit.streamout_mask = 0;
312 emit.prog = NULL;
313 emit.draw_id = 0;
314
315 if (!(ctx->prog.vs && ctx->prog.fs))
316 return;
317
318 if (PIPELINE == HAS_TESS_GS) {
319 if ((info->mode == MESA_PRIM_PATCHES) || ctx->prog.gs) {
320 ctx->gen_dirty |= BIT(FD6_GROUP_PRIMITIVE_PARAMS);
321 }
322 }
323
324 if ((PIPELINE == NO_TESS_GS) && !is_indirect(DRAW)) {
325 fd6_vsc_update_sizes(ctx->batch, info, &draws[0]);
326 }
327
328 /* If PROG state (which will mark PROG_KEY dirty) or any state that the
329 * key depends on, is dirty, then we actually need to construct the shader
330 * key, figure out if we need a new variant, and lookup the PROG state.
331 * Otherwise we can just use the previous prog state.
332 */
333 if (unlikely(ctx->gen_dirty & BIT(FD6_GROUP_PROG_KEY))) {
334 emit.prog = get_program_state<PIPELINE>(ctx, info);
335 } else {
336 emit.prog = fd6_ctx->prog;
337 }
338
339 /* bail if compile failed: */
340 if (!emit.prog)
341 return;
342
343 fixup_draw_state(ctx, &emit);
344
345 /* *after* fixup_shader_state(): */
346 emit.dirty_groups = ctx->gen_dirty;
347
348 emit.vs = fd6_emit_get_prog(&emit)->vs;
349 if (PIPELINE == HAS_TESS_GS) {
350 emit.hs = fd6_emit_get_prog(&emit)->hs;
351 emit.ds = fd6_emit_get_prog(&emit)->ds;
352 emit.gs = fd6_emit_get_prog(&emit)->gs;
353 }
354 emit.fs = fd6_emit_get_prog(&emit)->fs;
355
356 if (emit.prog->num_driver_params || fd6_ctx->has_dp_state) {
357 emit.draw = &draws[0];
358 emit.dirty_groups |= BIT(FD6_GROUP_DRIVER_PARAMS);
359 }
360
361 /* If we are doing xfb, we need to emit the xfb state on every draw: */
362 if (emit.prog->stream_output)
363 emit.dirty_groups |= BIT(FD6_GROUP_SO);
364
365 if (unlikely(ctx->stats_users > 0)) {
366 ctx->stats.vs_regs += ir3_shader_halfregs(emit.vs);
367 if (PIPELINE == HAS_TESS_GS) {
368 ctx->stats.hs_regs += COND(emit.hs, ir3_shader_halfregs(emit.hs));
369 ctx->stats.ds_regs += COND(emit.ds, ir3_shader_halfregs(emit.ds));
370 ctx->stats.gs_regs += COND(emit.gs, ir3_shader_halfregs(emit.gs));
371 }
372 ctx->stats.fs_regs += ir3_shader_halfregs(emit.fs);
373 }
374
375 struct fd_ringbuffer *ring = ctx->batch->draw;
376
377 struct CP_DRAW_INDX_OFFSET_0 draw0 = {
378 .prim_type = ctx->screen->primtypes[info->mode],
379 .vis_cull = USE_VISIBILITY,
380 .gs_enable = !!ctx->prog.gs,
381 };
382
383 if (DRAW == DRAW_INDIRECT_OP_XFB) {
384 draw0.source_select = DI_SRC_SEL_AUTO_XFB;
385 } else if (DRAW == DRAW_DIRECT_OP_INDEXED ||
386 DRAW == DRAW_INDIRECT_OP_INDIRECT_COUNT_INDEXED ||
387 DRAW == DRAW_INDIRECT_OP_INDEXED) {
388 draw0.source_select = DI_SRC_SEL_DMA;
389 draw0.index_size = fd4_size2indextype(info->index_size);
390 } else {
391 draw0.source_select = DI_SRC_SEL_AUTO_INDEX;
392 }
393
394 if ((PIPELINE == HAS_TESS_GS) && (info->mode == MESA_PRIM_PATCHES)) {
395 struct shader_info *ds_info =
396 ir3_get_shader_info((struct ir3_shader_state *)ctx->prog.ds);
397 unsigned tessellation = ir3_tess_mode(ds_info->tess._primitive_mode);
398
399 uint32_t factor_stride = ir3_tess_factor_stride(tessellation);
400
401 STATIC_ASSERT(IR3_TESS_ISOLINES == TESS_ISOLINES + 1);
402 STATIC_ASSERT(IR3_TESS_TRIANGLES == TESS_TRIANGLES + 1);
403 STATIC_ASSERT(IR3_TESS_QUADS == TESS_QUADS + 1);
404 draw0.patch_type = (enum a6xx_patch_type)(tessellation - 1);
405
406 draw0.prim_type = (enum pc_di_primtype)(DI_PT_PATCHES0 + ctx->patch_vertices);
407 draw0.tess_enable = true;
408
409 /* maximum number of patches that can fit in tess factor/param buffers */
410 uint32_t subdraw_size = MIN2(FD6_TESS_FACTOR_SIZE / factor_stride,
411 FD6_TESS_PARAM_SIZE / (emit.hs->output_size * 4));
412 /* convert from # of patches to draw count */
413 subdraw_size *= ctx->patch_vertices;
414
415 OUT_PKT7(ring, CP_SET_SUBDRAW_SIZE, 1);
416 OUT_RING(ring, subdraw_size);
417
418 ctx->batch->tessellation = true;
419 }
420
421 uint32_t index_start = is_indexed(DRAW) ? draws[0].index_bias : draws[0].start;
422 if (ctx->last.dirty || (ctx->last.index_start != index_start)) {
423 OUT_PKT4(ring, REG_A6XX_VFD_INDEX_OFFSET, 1);
424 OUT_RING(ring, index_start); /* VFD_INDEX_OFFSET */
425 ctx->last.index_start = index_start;
426 }
427
428 if (ctx->last.dirty || (ctx->last.instance_start != info->start_instance)) {
429 OUT_PKT4(ring, REG_A6XX_VFD_INSTANCE_START_OFFSET, 1);
430 OUT_RING(ring, info->start_instance); /* VFD_INSTANCE_START_OFFSET */
431 ctx->last.instance_start = info->start_instance;
432 }
433
434 uint32_t restart_index =
435 info->primitive_restart ? info->restart_index : 0xffffffff;
436 if (ctx->last.dirty || (ctx->last.restart_index != restart_index)) {
437 OUT_PKT4(ring, REG_A6XX_PC_RESTART_INDEX, 1);
438 OUT_RING(ring, restart_index); /* PC_RESTART_INDEX */
439 ctx->last.restart_index = restart_index;
440 }
441
442 if (emit.dirty_groups)
443 fd6_emit_3d_state<CHIP, PIPELINE>(ring, &emit);
444
445 /* All known firmware versions do not wait for WFI's with CP_DRAW_AUTO.
446 * Plus, for the common case where the counter buffer is written by
447 * vkCmdEndTransformFeedback, we need to wait for the CP_WAIT_MEM_WRITES to
448 * complete which means we need a WAIT_FOR_ME anyway.
449 *
450 * Also, on some firmwares CP_DRAW_INDIRECT_MULTI waits for WFIs before
451 * reading the draw parameters but after reading the count, so commands
452 * that use indirect draw count need a WFM anyway.
453 */
454 if (DRAW == DRAW_INDIRECT_OP_XFB ||
455 DRAW == DRAW_INDIRECT_OP_INDIRECT_COUNT_INDEXED ||
456 DRAW == DRAW_INDIRECT_OP_INDIRECT_COUNT)
457 ctx->batch->barrier |= FD6_WAIT_FOR_ME;
458
459 if (ctx->batch->barrier)
460 fd6_barrier_flush(ctx->batch);
461
462 /* for debug after a lock up, write a unique counter value
463 * to scratch7 for each draw, to make it easier to match up
464 * register dumps to cmdstream. The combination of IB
465 * (scratch6) and DRAW is enough to "triangulate" the
466 * particular draw that caused lockup.
467 */
468 emit_marker6(ring, 7);
469
470 if (is_indirect(DRAW)) {
471 assert(num_draws == 1); /* only >1 for direct draws */
472 if (DRAW == DRAW_INDIRECT_OP_XFB) {
473 draw_emit_xfb(ring, &draw0, info, indirect);
474 } else {
475 const struct ir3_const_state *const_state = ir3_const_state(emit.vs);
476 uint32_t dst_offset_dp = const_state->offsets.driver_param;
477
478 /* If unused, pass 0 for DST_OFF: */
479 if (dst_offset_dp > emit.vs->constlen)
480 dst_offset_dp = 0;
481
482 draw_emit_indirect<DRAW>(ctx, ring, &draw0, info, indirect, index_offset, dst_offset_dp);
483 }
484 } else {
485 draw_emit<DRAW>(ring, &draw0, info, &draws[0], index_offset);
486
487 if (unlikely(num_draws > 1)) {
488
489 /*
490 * Most state won't need to be re-emitted, other than xfb and
491 * driver-params:
492 */
493 emit.dirty_groups = 0;
494
495 if (emit.prog->num_driver_params)
496 emit.dirty_groups |= BIT(FD6_GROUP_DRIVER_PARAMS);
497
498 if (emit.prog->stream_output)
499 emit.dirty_groups |= BIT(FD6_GROUP_SO);
500
501 uint32_t last_index_start = ctx->last.index_start;
502
503 for (unsigned i = 1; i < num_draws; i++) {
504 flush_streamout(ctx, &emit);
505
506 fd6_vsc_update_sizes(ctx->batch, info, &draws[i]);
507
508 uint32_t index_start = is_indexed(DRAW) ? draws[i].index_bias : draws[i].start;
509 if (last_index_start != index_start) {
510 OUT_PKT4(ring, REG_A6XX_VFD_INDEX_OFFSET, 1);
511 OUT_RING(ring, index_start); /* VFD_INDEX_OFFSET */
512 last_index_start = index_start;
513 }
514
515 if (emit.dirty_groups) {
516 emit.state.num_groups = 0;
517 emit.draw = &draws[i];
518 emit.draw_id = info->increment_draw_id ? i : 0;
519 fd6_emit_3d_state<CHIP, PIPELINE>(ring, &emit);
520 }
521
522 assert(!index_offset); /* handled by util_draw_multi() */
523
524 draw_emit<DRAW>(ring, &draw0, info, &draws[i], 0);
525 }
526
527 ctx->last.index_start = last_index_start;
528 }
529 }
530
531 emit_marker6(ring, 7);
532
533 flush_streamout(ctx, &emit);
534
535 fd_context_all_clean(ctx);
536 }
537
538 template <chip CHIP, fd6_pipeline_type PIPELINE>
539 static void
fd6_draw_vbos(struct fd_context * ctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws,unsigned index_offset)540 fd6_draw_vbos(struct fd_context *ctx, const struct pipe_draw_info *info,
541 unsigned drawid_offset,
542 const struct pipe_draw_indirect_info *indirect,
543 const struct pipe_draw_start_count_bias *draws,
544 unsigned num_draws,
545 unsigned index_offset)
546 assert_dt
547 {
548 /* Non-indirect case is where we are more likely to see a high draw rate: */
549 if (likely(!indirect)) {
550 if (info->index_size) {
551 draw_vbos<CHIP, PIPELINE, DRAW_DIRECT_OP_INDEXED>(
552 ctx, info, drawid_offset, NULL, draws, num_draws, index_offset);
553 } else {
554 draw_vbos<CHIP, PIPELINE, DRAW_DIRECT_OP_NORMAL>(
555 ctx, info, drawid_offset, NULL, draws, num_draws, index_offset);
556 }
557 } else if (indirect->count_from_stream_output) {
558 draw_vbos<CHIP, PIPELINE, DRAW_INDIRECT_OP_XFB>(
559 ctx, info, drawid_offset, indirect, draws, num_draws, index_offset);
560 } else if (indirect->indirect_draw_count && info->index_size) {
561 draw_vbos<CHIP, PIPELINE, DRAW_INDIRECT_OP_INDIRECT_COUNT_INDEXED>(
562 ctx, info, drawid_offset, indirect, draws, num_draws, index_offset);
563 } else if (indirect->indirect_draw_count) {
564 draw_vbos<CHIP, PIPELINE, DRAW_INDIRECT_OP_INDIRECT_COUNT>(
565 ctx, info, drawid_offset, indirect, draws, num_draws, index_offset);
566 } else if (info->index_size) {
567 draw_vbos<CHIP, PIPELINE, DRAW_INDIRECT_OP_INDEXED>(
568 ctx, info, drawid_offset, indirect, draws, num_draws, index_offset);
569 } else {
570 draw_vbos<CHIP, PIPELINE, DRAW_INDIRECT_OP_NORMAL>(
571 ctx, info, drawid_offset, indirect, draws, num_draws, index_offset);
572 }
573 }
574
575 template <chip CHIP>
576 static void
fd6_update_draw(struct fd_context * ctx)577 fd6_update_draw(struct fd_context *ctx)
578 {
579 const uint32_t gs_tess_stages = BIT(MESA_SHADER_TESS_CTRL) |
580 BIT(MESA_SHADER_TESS_EVAL) | BIT(MESA_SHADER_GEOMETRY);
581
582 if (ctx->bound_shader_stages & gs_tess_stages) {
583 ctx->draw_vbos = fd6_draw_vbos<CHIP, HAS_TESS_GS>;
584 } else {
585 ctx->draw_vbos = fd6_draw_vbos<CHIP, NO_TESS_GS>;
586 }
587 }
588
589 static bool
do_lrz_clear(struct fd_context * ctx,enum fd_buffer_mask buffers)590 do_lrz_clear(struct fd_context *ctx, enum fd_buffer_mask buffers)
591 {
592 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
593
594 if (!pfb->zsbuf)
595 return false;
596
597 struct fd_resource *zsbuf = fd_resource(pfb->zsbuf->texture);
598
599 return (buffers & FD_BUFFER_DEPTH) && zsbuf->lrz;
600 }
601
602 static bool
fd6_clear(struct fd_context * ctx,enum fd_buffer_mask buffers,const union pipe_color_union * color,double depth,unsigned stencil)603 fd6_clear(struct fd_context *ctx, enum fd_buffer_mask buffers,
604 const union pipe_color_union *color, double depth,
605 unsigned stencil) assert_dt
606 {
607 struct pipe_framebuffer_state *pfb = &ctx->batch->framebuffer;
608 struct fd_batch_subpass *subpass = ctx->batch->subpass;
609 unsigned color_buffers = buffers >> 2;
610
611 if (pfb->samples > 1) {
612 /* we need to do multisample clear on 3d pipe, so fallback to u_blitter.
613 * But we do this ourselves so that we can still benefit from LRZ, as
614 * normally zfunc==ALWAYS would invalidate LRZ. So we want to mark the
615 * LRZ state as valid *after* the fallback clear.
616 */
617 fd_blitter_clear(&ctx->base, (unsigned)buffers, color, depth, stencil);
618 }
619
620 /* If we are clearing after draws, split out a new subpass:
621 */
622 if (subpass->num_draws > 0) {
623 /* If we won't be able to do any fast-clears, avoid pointlessly
624 * splitting out a new subpass:
625 */
626 if (pfb->samples > 1 && !do_lrz_clear(ctx, buffers))
627 return true;
628
629 subpass = fd_batch_create_subpass(ctx->batch);
630
631 /* If doing an LRZ clear, replace the existing LRZ buffer with a
632 * freshly allocated one so that we have valid LRZ state for the
633 * new pass. Otherwise unconditional writes to the depth buffer
634 * would cause LRZ state to be invalid.
635 */
636 if (do_lrz_clear(ctx, buffers)) {
637 struct fd_resource *zsbuf = fd_resource(pfb->zsbuf->texture);
638
639 fd_bo_del(subpass->lrz);
640 subpass->lrz = fd_bo_new(ctx->screen->dev, fd_bo_size(zsbuf->lrz),
641 FD_BO_NOMAP, "lrz");
642 fd_bo_del(zsbuf->lrz);
643 zsbuf->lrz = fd_bo_ref(subpass->lrz);
644 }
645 }
646
647 if (do_lrz_clear(ctx, buffers)) {
648 struct fd_resource *zsbuf = fd_resource(pfb->zsbuf->texture);
649
650 zsbuf->lrz_valid = true;
651 zsbuf->lrz_direction = FD_LRZ_UNKNOWN;
652 subpass->clear_depth = depth;
653 subpass->fast_cleared |= FD_BUFFER_LRZ;
654
655 STATIC_ASSERT((FD_BUFFER_LRZ & FD_BUFFER_ALL) == 0);
656 }
657
658 /* We've already done the fallback 3d clear: */
659 if (pfb->samples > 1)
660 return true;
661
662 u_foreach_bit (i, color_buffers)
663 subpass->clear_color[i] = *color;
664 if (buffers & FD_BUFFER_DEPTH)
665 subpass->clear_depth = depth;
666 if (buffers & FD_BUFFER_STENCIL)
667 subpass->clear_stencil = stencil;
668
669 subpass->fast_cleared |= buffers;
670
671 return true;
672 }
673
674 template <chip CHIP>
675 void
fd6_draw_init(struct pipe_context * pctx)676 fd6_draw_init(struct pipe_context *pctx)
677 disable_thread_safety_analysis
678 {
679 struct fd_context *ctx = fd_context(pctx);
680 ctx->clear = fd6_clear;
681 ctx->update_draw = fd6_update_draw<CHIP>;
682 fd6_update_draw<CHIP>(ctx);
683 }
684
685 /* Teach the compiler about needed variants: */
686 template void fd6_draw_init<A6XX>(struct pipe_context *pctx);
687 template void fd6_draw_init<A7XX>(struct pipe_context *pctx);
688