1 /*
2 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robclark@freedesktop.org>
25 */
26
27 #include "pipe/p_state.h"
28 #include "util/format/u_format.h"
29 #include "util/u_draw.h"
30 #include "util/u_helpers.h"
31 #include "util/u_memory.h"
32 #include "util/u_prim.h"
33 #include "util/u_string.h"
34
35 #include "freedreno_blitter.h"
36 #include "freedreno_context.h"
37 #include "freedreno_draw.h"
38 #include "freedreno_fence.h"
39 #include "freedreno_query_acc.h"
40 #include "freedreno_query_hw.h"
41 #include "freedreno_resource.h"
42 #include "freedreno_state.h"
43 #include "freedreno_util.h"
44
45 static bool
batch_references_resource(struct fd_batch * batch,struct pipe_resource * prsc)46 batch_references_resource(struct fd_batch *batch, struct pipe_resource *prsc)
47 assert_dt
48 {
49 return fd_batch_references_resource(batch, fd_resource(prsc));
50 }
51
52 static void
resource_read(struct fd_batch * batch,struct pipe_resource * prsc)53 resource_read(struct fd_batch *batch, struct pipe_resource *prsc) assert_dt
54 {
55 if (!prsc)
56 return;
57 fd_batch_resource_read(batch, fd_resource(prsc));
58 }
59
60 static void
resource_written(struct fd_batch * batch,struct pipe_resource * prsc)61 resource_written(struct fd_batch *batch, struct pipe_resource *prsc) assert_dt
62 {
63 if (!prsc)
64 return;
65 fd_batch_resource_write(batch, fd_resource(prsc));
66 }
67
68 static void
batch_draw_tracking_for_dirty_bits(struct fd_batch * batch)69 batch_draw_tracking_for_dirty_bits(struct fd_batch *batch) assert_dt
70 {
71 struct fd_context *ctx = batch->ctx;
72 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
73 enum fd_dirty_3d_state dirty = ctx->dirty_resource;
74 unsigned buffers = 0, restore_buffers = 0;
75
76 if (dirty & (FD_DIRTY_FRAMEBUFFER | FD_DIRTY_ZSA)) {
77 if (fd_depth_enabled(ctx)) {
78 if (fd_resource(pfb->zsbuf->texture)->valid) {
79 restore_buffers |= FD_BUFFER_DEPTH;
80 /* storing packed d/s depth also stores stencil, so we need
81 * the stencil restored too to avoid invalidating it.
82 */
83 if (pfb->zsbuf->texture->format == PIPE_FORMAT_Z24_UNORM_S8_UINT)
84 restore_buffers |= FD_BUFFER_STENCIL;
85 } else {
86 batch->invalidated |= FD_BUFFER_DEPTH;
87 }
88 batch->gmem_reason |= FD_GMEM_DEPTH_ENABLED;
89 if (fd_depth_write_enabled(ctx)) {
90 buffers |= FD_BUFFER_DEPTH;
91 resource_written(batch, pfb->zsbuf->texture);
92 } else {
93 resource_read(batch, pfb->zsbuf->texture);
94 }
95 }
96
97 if (fd_stencil_enabled(ctx)) {
98 if (fd_resource(pfb->zsbuf->texture)->valid) {
99 restore_buffers |= FD_BUFFER_STENCIL;
100 /* storing packed d/s stencil also stores depth, so we need
101 * the depth restored too to avoid invalidating it.
102 */
103 if (pfb->zsbuf->texture->format == PIPE_FORMAT_Z24_UNORM_S8_UINT)
104 restore_buffers |= FD_BUFFER_DEPTH;
105 } else {
106 batch->invalidated |= FD_BUFFER_STENCIL;
107 }
108 batch->gmem_reason |= FD_GMEM_STENCIL_ENABLED;
109 buffers |= FD_BUFFER_STENCIL;
110 resource_written(batch, pfb->zsbuf->texture);
111 }
112 }
113
114 if (dirty & FD_DIRTY_FRAMEBUFFER) {
115 for (unsigned i = 0; i < pfb->nr_cbufs; i++) {
116 struct pipe_resource *surf;
117
118 if (!pfb->cbufs[i])
119 continue;
120
121 surf = pfb->cbufs[i]->texture;
122
123 if (fd_resource(surf)->valid) {
124 restore_buffers |= PIPE_CLEAR_COLOR0 << i;
125 } else {
126 batch->invalidated |= PIPE_CLEAR_COLOR0 << i;
127 }
128
129 buffers |= PIPE_CLEAR_COLOR0 << i;
130
131 resource_written(batch, pfb->cbufs[i]->texture);
132 }
133 }
134
135 if (dirty & (FD_DIRTY_CONST | FD_DIRTY_TEX | FD_DIRTY_SSBO | FD_DIRTY_IMAGE)) {
136 u_foreach_bit (s, ctx->bound_shader_stages) {
137 enum fd_dirty_shader_state dirty_shader = ctx->dirty_shader_resource[s];
138
139 /* Mark constbuf as being read: */
140 if (dirty_shader & FD_DIRTY_SHADER_CONST) {
141 u_foreach_bit (i, ctx->constbuf[s].enabled_mask)
142 resource_read(batch, ctx->constbuf[s].cb[i].buffer);
143 }
144
145 /* Mark textures as being read */
146 if (dirty_shader & FD_DIRTY_SHADER_TEX) {
147 u_foreach_bit (i, ctx->tex[s].valid_textures)
148 resource_read(batch, ctx->tex[s].textures[i]->texture);
149 }
150
151 /* Mark SSBOs as being read or written: */
152 if (dirty_shader & FD_DIRTY_SHADER_SSBO) {
153 const struct fd_shaderbuf_stateobj *so = &ctx->shaderbuf[s];
154
155 u_foreach_bit (i, so->enabled_mask & so->writable_mask)
156 resource_written(batch, so->sb[i].buffer);
157
158 u_foreach_bit (i, so->enabled_mask & ~so->writable_mask)
159 resource_read(batch, so->sb[i].buffer);
160 }
161
162 /* Mark Images as being read or written: */
163 if (dirty_shader & FD_DIRTY_SHADER_IMAGE) {
164 u_foreach_bit (i, ctx->shaderimg[s].enabled_mask) {
165 struct pipe_image_view *img = &ctx->shaderimg[s].si[i];
166 if (img->access & PIPE_IMAGE_ACCESS_WRITE)
167 resource_written(batch, img->resource);
168 else
169 resource_read(batch, img->resource);
170 }
171 }
172 }
173 }
174
175 /* Mark VBOs as being read */
176 if (dirty & FD_DIRTY_VTXBUF) {
177 u_foreach_bit (i, ctx->vtx.vertexbuf.enabled_mask) {
178 assert(!ctx->vtx.vertexbuf.vb[i].is_user_buffer);
179 resource_read(batch, ctx->vtx.vertexbuf.vb[i].buffer.resource);
180 }
181 }
182
183 /* Mark streamout buffers as being written.. */
184 if (dirty & FD_DIRTY_STREAMOUT) {
185 for (unsigned i = 0; i < ctx->streamout.num_targets; i++) {
186 struct fd_stream_output_target *target =
187 fd_stream_output_target(ctx->streamout.targets[i]);
188
189 if (target) {
190 resource_written(batch, target->base.buffer);
191 resource_written(batch, target->offset_buf);
192 }
193 }
194 }
195
196 if (dirty & FD_DIRTY_QUERY) {
197 list_for_each_entry (struct fd_acc_query, aq, &ctx->acc_active_queries, node) {
198 resource_written(batch, aq->prsc);
199 }
200 }
201
202 /* any buffers that haven't been cleared yet, we need to restore: */
203 batch->restore |= restore_buffers & (FD_BUFFER_ALL & ~batch->invalidated);
204 /* and any buffers used, need to be resolved: */
205 batch->resolve |= buffers;
206 }
207
208 static bool
needs_draw_tracking(struct fd_batch * batch,const struct pipe_draw_info * info,const struct pipe_draw_indirect_info * indirect)209 needs_draw_tracking(struct fd_batch *batch, const struct pipe_draw_info *info,
210 const struct pipe_draw_indirect_info *indirect)
211 assert_dt
212 {
213 struct fd_context *ctx = batch->ctx;
214
215 if (ctx->dirty_resource)
216 return true;
217
218 if (info->index_size && !batch_references_resource(batch, info->index.resource))
219 return true;
220
221 if (indirect) {
222 if (indirect->buffer && !batch_references_resource(batch, indirect->buffer))
223 return true;
224 if (indirect->indirect_draw_count &&
225 !batch_references_resource(batch, indirect->indirect_draw_count))
226 return true;
227 if (indirect->count_from_stream_output)
228 return true;
229 }
230
231 return false;
232 }
233
234 static void
batch_draw_tracking(struct fd_batch * batch,const struct pipe_draw_info * info,const struct pipe_draw_indirect_info * indirect)235 batch_draw_tracking(struct fd_batch *batch, const struct pipe_draw_info *info,
236 const struct pipe_draw_indirect_info *indirect) assert_dt
237 {
238 struct fd_context *ctx = batch->ctx;
239
240 if (!needs_draw_tracking(batch, info, indirect))
241 goto out;
242
243 /*
244 * Figure out the buffers/features we need:
245 */
246
247 fd_screen_lock(ctx->screen);
248
249 if (ctx->dirty_resource)
250 batch_draw_tracking_for_dirty_bits(batch);
251
252 /* Mark index buffer as being read */
253 if (info->index_size)
254 resource_read(batch, info->index.resource);
255
256 /* Mark indirect draw buffer as being read */
257 if (indirect) {
258 resource_read(batch, indirect->buffer);
259 resource_read(batch, indirect->indirect_draw_count);
260 if (indirect->count_from_stream_output)
261 resource_read(
262 batch, fd_stream_output_target(indirect->count_from_stream_output)
263 ->offset_buf);
264 }
265
266 resource_written(batch, batch->query_buf);
267
268 fd_screen_unlock(ctx->screen);
269
270 out:
271 fd_batch_update_queries(batch);
272 }
273
274 static void
update_draw_stats(struct fd_context * ctx,const struct pipe_draw_info * info,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)275 update_draw_stats(struct fd_context *ctx, const struct pipe_draw_info *info,
276 const struct pipe_draw_start_count_bias *draws,
277 unsigned num_draws) assert_dt
278 {
279 ctx->stats.draw_calls++;
280
281 if (ctx->screen->gen < 6) {
282 /* Counting prims in sw doesn't work for GS and tesselation. For older
283 * gens we don't have those stages and don't have the hw counters enabled,
284 * so keep the count accurate for non-patch geometry.
285 */
286 unsigned prims = 0;
287 if ((info->mode != MESA_PRIM_PATCHES) && (info->mode != MESA_PRIM_COUNT)) {
288 for (unsigned i = 0; i < num_draws; i++) {
289 prims += u_reduced_prims_for_vertices(info->mode, draws[i].count);
290 }
291 }
292
293 ctx->stats.prims_generated += prims;
294
295 if (ctx->streamout.num_targets > 0) {
296 /* Clip the prims we're writing to the size of the SO buffers. */
297 enum mesa_prim tf_prim = u_decomposed_prim(info->mode);
298 unsigned verts_written = u_vertices_for_prims(tf_prim, prims);
299 unsigned remaining_vert_space =
300 ctx->streamout.max_tf_vtx - ctx->streamout.verts_written;
301 if (verts_written > remaining_vert_space) {
302 verts_written = remaining_vert_space;
303 u_trim_pipe_prim(tf_prim, &remaining_vert_space);
304 }
305 ctx->streamout.verts_written += verts_written;
306
307 ctx->stats.prims_emitted +=
308 u_reduced_prims_for_vertices(tf_prim, verts_written);
309 }
310 }
311 }
312
313 static void
fd_draw_vbo(struct pipe_context * pctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)314 fd_draw_vbo(struct pipe_context *pctx, const struct pipe_draw_info *info,
315 unsigned drawid_offset,
316 const struct pipe_draw_indirect_info *indirect,
317 const struct pipe_draw_start_count_bias *draws, unsigned num_draws) in_dt
318 {
319 struct fd_context *ctx = fd_context(pctx);
320
321 /* for debugging problems with indirect draw, it is convenient
322 * to be able to emulate it, to determine if game is feeding us
323 * bogus data:
324 */
325 if (indirect && indirect->buffer && FD_DBG(NOINDR)) {
326 /* num_draws is only applicable for direct draws: */
327 assert(num_draws == 1);
328 util_draw_indirect(pctx, info, indirect);
329 return;
330 }
331
332 /* TODO: push down the region versions into the tiles */
333 if (!fd_render_condition_check(pctx))
334 return;
335
336 /* Upload a user index buffer. */
337 struct pipe_resource *indexbuf = NULL;
338 unsigned index_offset = 0;
339 struct pipe_draw_info new_info;
340 if (info->index_size) {
341 if (info->has_user_indices) {
342 if (num_draws > 1) {
343 util_draw_multi(pctx, info, drawid_offset, indirect, draws, num_draws);
344 return;
345 }
346 if (!util_upload_index_buffer(pctx, info, &draws[0], &indexbuf,
347 &index_offset, 4))
348 return;
349 new_info = *info;
350 new_info.index.resource = indexbuf;
351 new_info.has_user_indices = false;
352 info = &new_info;
353 } else {
354 indexbuf = info->index.resource;
355 }
356 }
357
358 if ((ctx->streamout.num_targets > 0) && (num_draws > 1)) {
359 util_draw_multi(pctx, info, drawid_offset, indirect, draws, num_draws);
360 return;
361 }
362
363 struct fd_batch *batch = fd_context_batch(ctx);
364
365 batch_draw_tracking(batch, info, indirect);
366
367 while (unlikely(batch->flushed)) {
368 /* The current batch was flushed in batch_draw_tracking()
369 * so start anew. We know this won't happen a second time
370 * since we are dealing with a fresh batch:
371 */
372 fd_batch_reference(&batch, NULL);
373 batch = fd_context_batch(ctx);
374 batch_draw_tracking(batch, info, indirect);
375 assert(ctx->batch == batch);
376 }
377
378 batch->num_draws++;
379 batch->subpass->num_draws++;
380
381 fd_print_dirty_state(ctx->dirty);
382
383 /* Marking the batch as needing flush must come after the batch
384 * dependency tracking (resource_read()/resource_write()), as that
385 * can trigger a flush
386 */
387 fd_batch_needs_flush(batch);
388
389 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
390 DBG("%p: %ux%u num_draws=%u (%s/%s)", batch, pfb->width, pfb->height,
391 batch->num_draws,
392 util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
393 util_format_short_name(pipe_surface_format(pfb->zsbuf)));
394
395 batch->cost += ctx->draw_cost;
396
397 ctx->draw_vbos(ctx, info, drawid_offset, indirect, draws, num_draws, index_offset);
398
399 if (unlikely(ctx->stats_users > 0))
400 update_draw_stats(ctx, info, draws, num_draws);
401
402 for (unsigned i = 0; i < ctx->streamout.num_targets; i++) {
403 assert(num_draws == 1);
404 ctx->streamout.offsets[i] += draws[0].count;
405 }
406
407 assert(!batch->flushed);
408
409 fd_batch_check_size(batch);
410 fd_batch_reference(&batch, NULL);
411
412 if (info == &new_info)
413 pipe_resource_reference(&indexbuf, NULL);
414 }
415
416 static void
fd_draw_vbo_dbg(struct pipe_context * pctx,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)417 fd_draw_vbo_dbg(struct pipe_context *pctx, const struct pipe_draw_info *info,
418 unsigned drawid_offset,
419 const struct pipe_draw_indirect_info *indirect,
420 const struct pipe_draw_start_count_bias *draws, unsigned num_draws)
421 in_dt
422 {
423 fd_draw_vbo(pctx, info, drawid_offset, indirect, draws, num_draws);
424
425 if (FD_DBG(DDRAW))
426 fd_context_all_dirty(fd_context(pctx));
427
428 if (FD_DBG(FLUSH))
429 pctx->flush(pctx, NULL, 0);
430 }
431
432 static void
batch_clear_tracking(struct fd_batch * batch,unsigned buffers)433 batch_clear_tracking(struct fd_batch *batch, unsigned buffers) assert_dt
434 {
435 struct fd_context *ctx = batch->ctx;
436 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
437 unsigned cleared_buffers;
438
439 /* pctx->clear() is only for full-surface clears, so scissor is
440 * equivalent to having GL_SCISSOR_TEST disabled:
441 */
442 batch->max_scissor.minx = 0;
443 batch->max_scissor.miny = 0;
444 batch->max_scissor.maxx = pfb->width - 1;
445 batch->max_scissor.maxy = pfb->height - 1;
446
447 /* for bookkeeping about which buffers have been cleared (and thus
448 * can fully or partially skip mem2gmem) we need to ignore buffers
449 * that have already had a draw, in case apps do silly things like
450 * clear after draw (ie. if you only clear the color buffer, but
451 * something like alpha-test causes side effects from the draw in
452 * the depth buffer, etc)
453 */
454 cleared_buffers = buffers & (FD_BUFFER_ALL & ~batch->restore);
455 batch->cleared |= buffers;
456 batch->invalidated |= cleared_buffers;
457
458 batch->resolve |= buffers;
459
460 fd_screen_lock(ctx->screen);
461
462 if (buffers & PIPE_CLEAR_COLOR)
463 for (unsigned i = 0; i < pfb->nr_cbufs; i++)
464 if (buffers & (PIPE_CLEAR_COLOR0 << i))
465 resource_written(batch, pfb->cbufs[i]->texture);
466
467 if (buffers & (PIPE_CLEAR_DEPTH | PIPE_CLEAR_STENCIL)) {
468 resource_written(batch, pfb->zsbuf->texture);
469 batch->gmem_reason |= FD_GMEM_CLEARS_DEPTH_STENCIL;
470 }
471
472 resource_written(batch, batch->query_buf);
473
474 list_for_each_entry (struct fd_acc_query, aq, &ctx->acc_active_queries, node)
475 resource_written(batch, aq->prsc);
476
477 fd_screen_unlock(ctx->screen);
478 }
479
480 static void
fd_clear(struct pipe_context * pctx,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * color,double depth,unsigned stencil)481 fd_clear(struct pipe_context *pctx, unsigned buffers,
482 const struct pipe_scissor_state *scissor_state,
483 const union pipe_color_union *color, double depth,
484 unsigned stencil) in_dt
485 {
486 struct fd_context *ctx = fd_context(pctx);
487
488 /* TODO: push down the region versions into the tiles */
489 if (!fd_render_condition_check(pctx))
490 return;
491
492 struct fd_batch *batch = fd_context_batch(ctx);
493
494 batch_clear_tracking(batch, buffers);
495
496 while (unlikely(batch->flushed)) {
497 /* The current batch was flushed in batch_clear_tracking()
498 * so start anew. We know this won't happen a second time
499 * since we are dealing with a fresh batch:
500 */
501 fd_batch_reference(&batch, NULL);
502 batch = fd_context_batch(ctx);
503 batch_clear_tracking(batch, buffers);
504 assert(ctx->batch == batch);
505 }
506
507 /* Marking the batch as needing flush must come after the batch
508 * dependency tracking (resource_read()/resource_write()), as that
509 * can trigger a flush
510 */
511 fd_batch_needs_flush(batch);
512
513 struct pipe_framebuffer_state *pfb = &batch->framebuffer;
514 DBG("%p: %x %ux%u depth=%f, stencil=%u (%s/%s)", batch, buffers, pfb->width,
515 pfb->height, depth, stencil,
516 util_format_short_name(pipe_surface_format(pfb->cbufs[0])),
517 util_format_short_name(pipe_surface_format(pfb->zsbuf)));
518
519 /* if per-gen backend doesn't implement ctx->clear() generic
520 * blitter clear:
521 */
522 bool fallback = true;
523
524 if (ctx->clear) {
525 fd_batch_update_queries(batch);
526
527 if (ctx->clear(ctx, buffers, color, depth, stencil)) {
528 if (FD_DBG(DCLEAR))
529 fd_context_all_dirty(ctx);
530
531 fallback = false;
532 }
533 }
534
535 assert(!batch->flushed);
536
537 if (fallback) {
538 fd_blitter_clear(pctx, buffers, color, depth, stencil);
539 }
540
541 fd_batch_check_size(batch);
542
543 fd_batch_reference(&batch, NULL);
544 }
545
546 static void
fd_clear_render_target(struct pipe_context * pctx,struct pipe_surface * ps,const union pipe_color_union * color,unsigned x,unsigned y,unsigned w,unsigned h,bool render_condition_enabled)547 fd_clear_render_target(struct pipe_context *pctx, struct pipe_surface *ps,
548 const union pipe_color_union *color, unsigned x,
549 unsigned y, unsigned w, unsigned h,
550 bool render_condition_enabled) in_dt
551 {
552 if (render_condition_enabled && !fd_render_condition_check(pctx))
553 return;
554
555 fd_blitter_clear_render_target(pctx, ps, color, x, y, w, h,
556 render_condition_enabled);
557 }
558
559 static void
fd_clear_depth_stencil(struct pipe_context * pctx,struct pipe_surface * ps,unsigned buffers,double depth,unsigned stencil,unsigned x,unsigned y,unsigned w,unsigned h,bool render_condition_enabled)560 fd_clear_depth_stencil(struct pipe_context *pctx, struct pipe_surface *ps,
561 unsigned buffers, double depth, unsigned stencil,
562 unsigned x, unsigned y, unsigned w, unsigned h,
563 bool render_condition_enabled) in_dt
564 {
565 if (render_condition_enabled && !fd_render_condition_check(pctx))
566 return;
567
568 fd_blitter_clear_depth_stencil(pctx, ps, buffers,
569 depth, stencil, x, y, w, h,
570 render_condition_enabled);
571 }
572
573 static void
fd_launch_grid(struct pipe_context * pctx,const struct pipe_grid_info * info)574 fd_launch_grid(struct pipe_context *pctx,
575 const struct pipe_grid_info *info) in_dt
576 {
577 struct fd_context *ctx = fd_context(pctx);
578 const struct fd_shaderbuf_stateobj *so =
579 &ctx->shaderbuf[PIPE_SHADER_COMPUTE];
580 struct fd_batch *batch, *save_batch = NULL;
581
582 if (!fd_render_condition_check(pctx))
583 return;
584
585 batch = fd_context_batch_nondraw(ctx);
586 fd_batch_reference(&save_batch, ctx->batch);
587 fd_batch_reference(&ctx->batch, batch);
588
589 fd_screen_lock(ctx->screen);
590
591 /* Mark SSBOs */
592 u_foreach_bit (i, so->enabled_mask & so->writable_mask)
593 resource_written(batch, so->sb[i].buffer);
594
595 u_foreach_bit (i, so->enabled_mask & ~so->writable_mask)
596 resource_read(batch, so->sb[i].buffer);
597
598 u_foreach_bit (i, ctx->shaderimg[PIPE_SHADER_COMPUTE].enabled_mask) {
599 struct pipe_image_view *img = &ctx->shaderimg[PIPE_SHADER_COMPUTE].si[i];
600 if (img->access & PIPE_IMAGE_ACCESS_WRITE)
601 resource_written(batch, img->resource);
602 else
603 resource_read(batch, img->resource);
604 }
605
606 /* UBO's are read */
607 u_foreach_bit (i, ctx->constbuf[PIPE_SHADER_COMPUTE].enabled_mask)
608 resource_read(batch, ctx->constbuf[PIPE_SHADER_COMPUTE].cb[i].buffer);
609
610 /* Mark textures as being read */
611 u_foreach_bit (i, ctx->tex[PIPE_SHADER_COMPUTE].valid_textures)
612 resource_read(batch, ctx->tex[PIPE_SHADER_COMPUTE].textures[i]->texture);
613
614 /* For global buffers, we don't really know if read or written, so assume
615 * the worst:
616 */
617 u_foreach_bit (i, ctx->global_bindings.enabled_mask)
618 resource_written(batch, ctx->global_bindings.buf[i]);
619
620 if (info->indirect)
621 resource_read(batch, info->indirect);
622
623 list_for_each_entry (struct fd_acc_query, aq, &ctx->acc_active_queries, node) {
624 resource_written(batch, aq->prsc);
625 }
626
627 /* If the saved batch has been flushed during the resource tracking,
628 * don't re-install it:
629 */
630 if (save_batch && save_batch->flushed)
631 fd_batch_reference_locked(&save_batch, NULL);
632
633 fd_screen_unlock(ctx->screen);
634
635 fd_batch_update_queries(batch);
636
637 DBG("%p: work_dim=%u, block=%ux%ux%u, grid=%ux%ux%u",
638 batch, info->work_dim,
639 info->block[0], info->block[1], info->block[2],
640 info->grid[0], info->grid[1], info->grid[2]);
641
642 fd_batch_needs_flush(batch);
643 ctx->launch_grid(ctx, info);
644
645 fd_batch_reference(&ctx->batch, save_batch);
646 fd_batch_reference(&save_batch, NULL);
647 fd_batch_reference(&batch, NULL);
648 }
649
650 void
fd_draw_init(struct pipe_context * pctx)651 fd_draw_init(struct pipe_context *pctx)
652 {
653 if (FD_DBG(DDRAW) || FD_DBG(FLUSH)) {
654 pctx->draw_vbo = fd_draw_vbo_dbg;
655 } else {
656 pctx->draw_vbo = fd_draw_vbo;
657 }
658
659 pctx->clear = fd_clear;
660 pctx->clear_render_target = fd_clear_render_target;
661 pctx->clear_depth_stencil = fd_clear_depth_stencil;
662
663 if (has_compute(fd_screen(pctx->screen))) {
664 pctx->launch_grid = fd_launch_grid;
665 }
666 }
667