• Home
  • Raw
  • Download

Lines Matching refs:batch

39 batch_init(struct fd_batch *batch)  in batch_init()  argument
41 struct fd_context *ctx = batch->ctx; in batch_init()
45 util_queue_fence_init(&batch->flush_fence); in batch_init()
57 batch->draw = fd_ringbuffer_new(ctx->pipe, size); in batch_init()
58 if (!batch->nondraw) { in batch_init()
59 batch->binning = fd_ringbuffer_new(ctx->pipe, size); in batch_init()
60 batch->gmem = fd_ringbuffer_new(ctx->pipe, size); in batch_init()
62 fd_ringbuffer_set_parent(batch->gmem, NULL); in batch_init()
63 fd_ringbuffer_set_parent(batch->draw, batch->gmem); in batch_init()
64 fd_ringbuffer_set_parent(batch->binning, batch->gmem); in batch_init()
66 fd_ringbuffer_set_parent(batch->draw, NULL); in batch_init()
69 batch->in_fence_fd = -1; in batch_init()
70 batch->fence = fd_fence_create(batch); in batch_init()
72 batch->cleared = batch->partial_cleared = 0; in batch_init()
73 batch->restore = batch->resolve = 0; in batch_init()
74 batch->needs_flush = false; in batch_init()
75 batch->gmem_reason = 0; in batch_init()
76 batch->num_draws = 0; in batch_init()
77 batch->stage = FD_STAGE_NULL; in batch_init()
79 fd_reset_wfi(batch); in batch_init()
82 batch->max_scissor.minx = batch->max_scissor.miny = ~0; in batch_init()
83 batch->max_scissor.maxx = batch->max_scissor.maxy = 0; in batch_init()
85 util_dynarray_init(&batch->draw_patches, NULL); in batch_init()
88 util_dynarray_init(&batch->rbrc_patches, NULL); in batch_init()
90 assert(batch->resources->entries == 0); in batch_init()
92 util_dynarray_init(&batch->samples, NULL); in batch_init()
98 struct fd_batch *batch = CALLOC_STRUCT(fd_batch); in fd_batch_create() local
100 if (!batch) in fd_batch_create()
103 DBG("%p", batch); in fd_batch_create()
105 pipe_reference_init(&batch->reference, 1); in fd_batch_create()
106 batch->ctx = ctx; in fd_batch_create()
107 batch->nondraw = nondraw; in fd_batch_create()
109 batch->resources = _mesa_set_create(NULL, _mesa_hash_pointer, in fd_batch_create()
112 batch_init(batch); in fd_batch_create()
114 return batch; in fd_batch_create()
118 batch_fini(struct fd_batch *batch) in batch_fini() argument
120 pipe_resource_reference(&batch->query_buf, NULL); in batch_fini()
122 if (batch->in_fence_fd != -1) in batch_fini()
123 close(batch->in_fence_fd); in batch_fini()
126 fd_fence_populate(batch->fence, 0, -1); in batch_fini()
128 fd_fence_ref(NULL, &batch->fence, NULL); in batch_fini()
130 fd_ringbuffer_del(batch->draw); in batch_fini()
131 if (!batch->nondraw) { in batch_fini()
132 fd_ringbuffer_del(batch->binning); in batch_fini()
133 fd_ringbuffer_del(batch->gmem); in batch_fini()
135 debug_assert(!batch->binning); in batch_fini()
136 debug_assert(!batch->gmem); in batch_fini()
138 if (batch->lrz_clear) { in batch_fini()
139 fd_ringbuffer_del(batch->lrz_clear); in batch_fini()
140 batch->lrz_clear = NULL; in batch_fini()
143 util_dynarray_fini(&batch->draw_patches); in batch_fini()
145 if (is_a3xx(batch->ctx->screen)) in batch_fini()
146 util_dynarray_fini(&batch->rbrc_patches); in batch_fini()
148 while (batch->samples.size > 0) { in batch_fini()
150 util_dynarray_pop(&batch->samples, struct fd_hw_sample *); in batch_fini()
151 fd_hw_sample_reference(batch->ctx, &samp, NULL); in batch_fini()
153 util_dynarray_fini(&batch->samples); in batch_fini()
155 if (batch->ctx->screen->reorder) in batch_fini()
156 util_queue_fence_destroy(&batch->flush_fence); in batch_fini()
160 batch_flush_reset_dependencies(struct fd_batch *batch, bool flush) in batch_flush_reset_dependencies() argument
162 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache; in batch_flush_reset_dependencies()
165 foreach_batch(dep, cache, batch->dependents_mask) { in batch_flush_reset_dependencies()
171 batch->dependents_mask = 0; in batch_flush_reset_dependencies()
175 batch_reset_resources_locked(struct fd_batch *batch) in batch_reset_resources_locked() argument
179 pipe_mutex_assert_locked(batch->ctx->screen->lock); in batch_reset_resources_locked()
181 set_foreach(batch->resources, entry) { in batch_reset_resources_locked()
183 _mesa_set_remove(batch->resources, entry); in batch_reset_resources_locked()
184 debug_assert(rsc->batch_mask & (1 << batch->idx)); in batch_reset_resources_locked()
185 rsc->batch_mask &= ~(1 << batch->idx); in batch_reset_resources_locked()
186 if (rsc->write_batch == batch) in batch_reset_resources_locked()
192 batch_reset_resources(struct fd_batch *batch) in batch_reset_resources() argument
194 mtx_lock(&batch->ctx->screen->lock); in batch_reset_resources()
195 batch_reset_resources_locked(batch); in batch_reset_resources()
196 mtx_unlock(&batch->ctx->screen->lock); in batch_reset_resources()
200 batch_reset(struct fd_batch *batch) in batch_reset() argument
202 DBG("%p", batch); in batch_reset()
204 fd_batch_sync(batch); in batch_reset()
206 batch_flush_reset_dependencies(batch, false); in batch_reset()
207 batch_reset_resources(batch); in batch_reset()
209 batch_fini(batch); in batch_reset()
210 batch_init(batch); in batch_reset()
214 fd_batch_reset(struct fd_batch *batch) in fd_batch_reset() argument
216 if (batch->needs_flush) in fd_batch_reset()
217 batch_reset(batch); in fd_batch_reset()
221 __fd_batch_destroy(struct fd_batch *batch) in __fd_batch_destroy() argument
223 DBG("%p", batch); in __fd_batch_destroy()
225 util_copy_framebuffer_state(&batch->framebuffer, NULL); in __fd_batch_destroy()
227 mtx_lock(&batch->ctx->screen->lock); in __fd_batch_destroy()
228 fd_bc_invalidate_batch(batch, true); in __fd_batch_destroy()
229 mtx_unlock(&batch->ctx->screen->lock); in __fd_batch_destroy()
231 batch_fini(batch); in __fd_batch_destroy()
233 batch_reset_resources(batch); in __fd_batch_destroy()
234 debug_assert(batch->resources->entries == 0); in __fd_batch_destroy()
235 _mesa_set_destroy(batch->resources, NULL); in __fd_batch_destroy()
237 batch_flush_reset_dependencies(batch, false); in __fd_batch_destroy()
238 debug_assert(batch->dependents_mask == 0); in __fd_batch_destroy()
240 free(batch); in __fd_batch_destroy()
244 __fd_batch_describe(char* buf, const struct fd_batch *batch) in __fd_batch_describe() argument
246 util_sprintf(buf, "fd_batch<%u>", batch->seqno); in __fd_batch_describe()
250 fd_batch_sync(struct fd_batch *batch) in fd_batch_sync() argument
252 if (!batch->ctx->screen->reorder) in fd_batch_sync()
254 util_queue_fence_wait(&batch->flush_fence); in fd_batch_sync()
260 struct fd_batch *batch = job; in batch_flush_func() local
262 fd_gmem_render_tiles(batch); in batch_flush_func()
263 batch_reset_resources(batch); in batch_flush_func()
269 struct fd_batch *batch = job; in batch_cleanup_func() local
270 fd_batch_reference(&batch, NULL); in batch_cleanup_func()
274 batch_flush(struct fd_batch *batch, bool force) in batch_flush() argument
276 DBG("%p: needs_flush=%d", batch, batch->needs_flush); in batch_flush()
278 if (!batch->needs_flush) { in batch_flush()
280 fd_gmem_render_noop(batch); in batch_flush()
286 batch->needs_flush = false; in batch_flush()
291 fd_batch_set_stage(batch, FD_STAGE_NULL); in batch_flush()
293 fd_context_all_dirty(batch->ctx); in batch_flush()
294 batch_flush_reset_dependencies(batch, true); in batch_flush()
296 if (batch->ctx->screen->reorder) { in batch_flush()
298 fd_batch_reference(&tmp, batch); in batch_flush()
300 if (!util_queue_is_initialized(&batch->ctx->flush_queue)) in batch_flush()
301 util_queue_init(&batch->ctx->flush_queue, "flush_queue", 16, 1, 0); in batch_flush()
303 util_queue_add_job(&batch->ctx->flush_queue, in batch_flush()
304 batch, &batch->flush_fence, in batch_flush()
307 fd_gmem_render_tiles(batch); in batch_flush()
308 batch_reset_resources(batch); in batch_flush()
311 debug_assert(batch->reference.count > 0); in batch_flush()
314 if (batch == batch->ctx->batch) { in batch_flush()
315 batch_reset(batch); in batch_flush()
317 mtx_lock(&batch->ctx->screen->lock); in batch_flush()
318 fd_bc_invalidate_batch(batch, false); in batch_flush()
319 mtx_unlock(&batch->ctx->screen->lock); in batch_flush()
332 fd_batch_flush(struct fd_batch *batch, bool sync, bool force) in fd_batch_flush() argument
340 fd_batch_reference(&tmp, batch); in fd_batch_flush()
349 batch_depends_on(struct fd_batch *batch, struct fd_batch *other) in batch_depends_on() argument
351 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache; in batch_depends_on()
354 if (batch->dependents_mask & (1 << other->idx)) in batch_depends_on()
357 foreach_batch(dep, cache, batch->dependents_mask) in batch_depends_on()
358 if (batch_depends_on(batch, dep)) in batch_depends_on()
365 fd_batch_add_dep(struct fd_batch *batch, struct fd_batch *dep) in fd_batch_add_dep() argument
367 if (batch->dependents_mask & (1 << dep->idx)) in fd_batch_add_dep()
373 if (batch_depends_on(dep, batch)) { in fd_batch_add_dep()
374 DBG("%p: flush forced on %p!", batch, dep); in fd_batch_add_dep()
375 mtx_unlock(&batch->ctx->screen->lock); in fd_batch_add_dep()
377 mtx_lock(&batch->ctx->screen->lock); in fd_batch_add_dep()
381 batch->dependents_mask |= (1 << dep->idx); in fd_batch_add_dep()
382 DBG("%p: added dependency on %p", batch, dep); in fd_batch_add_dep()
387 fd_batch_resource_used(struct fd_batch *batch, struct fd_resource *rsc, bool write) in fd_batch_resource_used() argument
389 pipe_mutex_assert_locked(batch->ctx->screen->lock); in fd_batch_resource_used()
392 fd_batch_resource_used(batch, rsc->stencil, write); in fd_batch_resource_used()
394 DBG("%p: %s %p", batch, write ? "write" : "read", rsc); in fd_batch_resource_used()
405 if (rsc->batch_mask != (1 << batch->idx)) { in fd_batch_resource_used()
406 struct fd_batch_cache *cache = &batch->ctx->screen->batch_cache; in fd_batch_resource_used()
410 if (dep == batch) in fd_batch_resource_used()
417 fd_batch_add_dep(batch, b); in fd_batch_resource_used()
422 fd_batch_reference_locked(&rsc->write_batch, batch); in fd_batch_resource_used()
425 fd_batch_add_dep(batch, rsc->write_batch); in fd_batch_resource_used()
430 if (rsc->batch_mask & (1 << batch->idx)) in fd_batch_resource_used()
433 debug_assert(!_mesa_set_search(batch->resources, rsc)); in fd_batch_resource_used()
435 _mesa_set_add(batch->resources, rsc); in fd_batch_resource_used()
436 rsc->batch_mask |= (1 << batch->idx); in fd_batch_resource_used()
440 fd_batch_check_size(struct fd_batch *batch) in fd_batch_check_size() argument
442 if (fd_device_version(batch->ctx->screen->dev) >= FD_VERSION_UNLIMITED_CMDS) in fd_batch_check_size()
445 struct fd_ringbuffer *ring = batch->draw; in fd_batch_check_size()
448 fd_batch_flush(batch, true, false); in fd_batch_check_size()
455 fd_wfi(struct fd_batch *batch, struct fd_ringbuffer *ring) in fd_wfi() argument
457 if (batch->needs_wfi) { in fd_wfi()
458 if (batch->ctx->screen->gpu_id >= 500) in fd_wfi()
462 batch->needs_wfi = false; in fd_wfi()