1 #include "zink_batch.h"
2 #include "zink_context.h"
3 #include "zink_descriptors.h"
4 #include "zink_framebuffer.h"
5 #include "zink_kopper.h"
6 #include "zink_program.h"
7 #include "zink_query.h"
8 #include "zink_resource.h"
9 #include "zink_screen.h"
10 #include "zink_surface.h"
11
12 #ifdef VK_USE_PLATFORM_METAL_EXT
13 #include "QuartzCore/CAMetalLayer.h"
14 #endif
15
16 #define MAX_VIEW_COUNT 500
17
18 void
debug_describe_zink_batch_state(char * buf,const struct zink_batch_state * ptr)19 debug_describe_zink_batch_state(char *buf, const struct zink_batch_state *ptr)
20 {
21 sprintf(buf, "zink_batch_state");
22 }
23
24 /* this resets the batch usage and tracking for a resource object */
25 static void
reset_obj(struct zink_screen * screen,struct zink_batch_state * bs,struct zink_resource_object * obj)26 reset_obj(struct zink_screen *screen, struct zink_batch_state *bs, struct zink_resource_object *obj)
27 {
28 /* if no batch usage exists after removing the usage from 'bs', this resource is considered fully idle */
29 if (!zink_resource_object_usage_unset(obj, bs)) {
30 /* the resource is idle, so reset all access/reordering info */
31 obj->unordered_read = true;
32 obj->unordered_write = true;
33 obj->access = 0;
34 obj->unordered_access = 0;
35 obj->last_write = 0;
36 obj->access_stage = 0;
37 obj->unordered_access_stage = 0;
38 obj->copies_need_reset = true;
39 obj->unsync_access = true;
40 /* also prune dead view objects */
41 simple_mtx_lock(&obj->view_lock);
42 if (obj->is_buffer) {
43 while (util_dynarray_contains(&obj->views, VkBufferView))
44 VKSCR(DestroyBufferView)(screen->dev, util_dynarray_pop(&obj->views, VkBufferView), NULL);
45 } else {
46 while (util_dynarray_contains(&obj->views, VkImageView))
47 VKSCR(DestroyImageView)(screen->dev, util_dynarray_pop(&obj->views, VkImageView), NULL);
48 }
49 obj->view_prune_count = 0;
50 obj->view_prune_timeline = 0;
51 simple_mtx_unlock(&obj->view_lock);
52 if (obj->dt)
53 zink_kopper_prune_batch_usage(obj->dt, &bs->usage);
54 } else if (util_dynarray_num_elements(&obj->views, VkBufferView) > MAX_VIEW_COUNT && !zink_bo_has_unflushed_usage(obj->bo)) {
55 /* avoid ballooning from too many views on always-used resources: */
56 simple_mtx_lock(&obj->view_lock);
57 /* ensure no existing view pruning is queued, double check elements in case pruning just finished */
58 if (!obj->view_prune_timeline && util_dynarray_num_elements(&obj->views, VkBufferView) > MAX_VIEW_COUNT) {
59 /* prune all existing views */
60 obj->view_prune_count = util_dynarray_num_elements(&obj->views, VkBufferView);
61 /* prune them when the views will definitely not be in use */
62 obj->view_prune_timeline = MAX2(obj->bo->reads.u ? obj->bo->reads.u->usage : 0,
63 obj->bo->writes.u ? obj->bo->writes.u->usage : 0);
64 }
65 simple_mtx_unlock(&obj->view_lock);
66 }
67 /* resource objects are not unrefed here;
68 * this is typically the last ref on a resource object, and destruction will
69 * usually trigger an ioctl, so defer deletion to the submit thread to avoid blocking
70 */
71 util_dynarray_append(&bs->unref_resources, struct zink_resource_object*, obj);
72 }
73
74 /* reset all the resource objects in a given batch object list */
75 static void
reset_obj_list(struct zink_screen * screen,struct zink_batch_state * bs,struct zink_batch_obj_list * list)76 reset_obj_list(struct zink_screen *screen, struct zink_batch_state *bs, struct zink_batch_obj_list *list)
77 {
78 for (unsigned i = 0; i < list->num_buffers; i++)
79 reset_obj(screen, bs, list->objs[i]);
80 list->num_buffers = 0;
81 }
82
83 /* reset a given batch state */
84 void
zink_reset_batch_state(struct zink_context * ctx,struct zink_batch_state * bs)85 zink_reset_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
86 {
87 struct zink_screen *screen = zink_screen(ctx->base.screen);
88
89 VkResult result = VKSCR(ResetCommandPool)(screen->dev, bs->cmdpool, 0);
90 if (result != VK_SUCCESS)
91 mesa_loge("ZINK: vkResetCommandPool failed (%s)", vk_Result_to_str(result));
92 result = VKSCR(ResetCommandPool)(screen->dev, bs->unsynchronized_cmdpool, 0);
93 if (result != VK_SUCCESS)
94 mesa_loge("ZINK: vkResetCommandPool failed (%s)", vk_Result_to_str(result));
95
96 /* unref/reset all used resources */
97 reset_obj_list(screen, bs, &bs->real_objs);
98 reset_obj_list(screen, bs, &bs->slab_objs);
99 reset_obj_list(screen, bs, &bs->sparse_objs);
100 while (util_dynarray_contains(&bs->swapchain_obj, struct zink_resource_object*)) {
101 struct zink_resource_object *obj = util_dynarray_pop(&bs->swapchain_obj, struct zink_resource_object*);
102 reset_obj(screen, bs, obj);
103 }
104
105 /* this is where bindless texture/buffer ids get recycled */
106 for (unsigned i = 0; i < 2; i++) {
107 while (util_dynarray_contains(&bs->bindless_releases[i], uint32_t)) {
108 uint32_t handle = util_dynarray_pop(&bs->bindless_releases[i], uint32_t);
109 bool is_buffer = ZINK_BINDLESS_IS_BUFFER(handle);
110 struct util_idalloc *ids = i ? &ctx->di.bindless[is_buffer].img_slots : &ctx->di.bindless[is_buffer].tex_slots;
111 util_idalloc_free(ids, is_buffer ? handle - ZINK_MAX_BINDLESS_HANDLES : handle);
112 }
113 }
114
115 /* queries must only be destroyed once they are inactive */
116 set_foreach_remove(&bs->active_queries, entry) {
117 struct zink_query *query = (void*)entry->key;
118 zink_prune_query(bs, query);
119 }
120 util_dynarray_foreach(&bs->dead_querypools, VkQueryPool, pool)
121 VKSCR(DestroyQueryPool)(screen->dev, *pool, NULL);
122 util_dynarray_clear(&bs->dead_querypools);
123
124 util_dynarray_foreach(&bs->dgc.pipelines, VkPipeline, pipeline)
125 VKSCR(DestroyPipeline)(screen->dev, *pipeline, NULL);
126 util_dynarray_clear(&bs->dgc.pipelines);
127 util_dynarray_foreach(&bs->dgc.layouts, VkIndirectCommandsLayoutNV, iclayout)
128 VKSCR(DestroyIndirectCommandsLayoutNV)(screen->dev, *iclayout, NULL);
129 util_dynarray_clear(&bs->dgc.layouts);
130
131 /* samplers are appended to the batch state in which they are destroyed
132 * to ensure deferred deletion without destroying in-use objects
133 */
134 util_dynarray_foreach(&bs->zombie_samplers, VkSampler, samp) {
135 VKSCR(DestroySampler)(screen->dev, *samp, NULL);
136 }
137 util_dynarray_clear(&bs->zombie_samplers);
138
139 zink_batch_descriptor_reset(screen, bs);
140
141 util_dynarray_foreach(&bs->freed_sparse_backing_bos, struct zink_bo, bo) {
142 zink_bo_unref(screen, bo);
143 }
144 util_dynarray_clear(&bs->freed_sparse_backing_bos);
145
146 /* programs are refcounted and batch-tracked */
147 set_foreach_remove(&bs->programs, entry) {
148 struct zink_program *pg = (struct zink_program*)entry->key;
149 zink_batch_usage_unset(&pg->batch_uses, bs);
150 zink_program_reference(screen, &pg, NULL);
151 }
152
153 bs->resource_size = 0;
154 bs->signal_semaphore = VK_NULL_HANDLE;
155 util_dynarray_clear(&bs->wait_semaphore_stages);
156
157 bs->present = VK_NULL_HANDLE;
158 /* check the arrays first to avoid locking unnecessarily */
159 if (util_dynarray_contains(&bs->acquires, VkSemaphore) || util_dynarray_contains(&bs->wait_semaphores, VkSemaphore)) {
160 simple_mtx_lock(&screen->semaphores_lock);
161 util_dynarray_append_dynarray(&screen->semaphores, &bs->acquires);
162 util_dynarray_clear(&bs->acquires);
163 util_dynarray_append_dynarray(&screen->semaphores, &bs->wait_semaphores);
164 util_dynarray_clear(&bs->wait_semaphores);
165 simple_mtx_unlock(&screen->semaphores_lock);
166 }
167 if (util_dynarray_contains(&bs->signal_semaphores, VkSemaphore) || util_dynarray_contains(&bs->fd_wait_semaphores, VkSemaphore)) {
168 simple_mtx_lock(&screen->semaphores_lock);
169 util_dynarray_append_dynarray(&screen->fd_semaphores, &bs->signal_semaphores);
170 util_dynarray_clear(&bs->signal_semaphores);
171 util_dynarray_append_dynarray(&screen->fd_semaphores, &bs->fd_wait_semaphores);
172 util_dynarray_clear(&bs->fd_wait_semaphores);
173 simple_mtx_unlock(&screen->semaphores_lock);
174 }
175 bs->swapchain = NULL;
176
177 util_dynarray_foreach(&bs->fences, struct zink_tc_fence*, mfence)
178 zink_fence_reference(screen, mfence, NULL);
179 util_dynarray_clear(&bs->fences);
180
181 bs->unordered_write_access = VK_ACCESS_NONE;
182 bs->unordered_write_stages = VK_PIPELINE_STAGE_NONE;
183
184 /* only increment batch generation if previously in-use to avoid false detection of batch completion */
185 if (bs->fence.submitted)
186 bs->usage.submit_count++;
187 /* only reset submitted here so that tc fence desync can pick up the 'completed' flag
188 * before the state is reused
189 */
190 bs->fence.submitted = false;
191 bs->has_barriers = false;
192 bs->has_unsync = false;
193 if (bs->fence.batch_id)
194 zink_screen_update_last_finished(screen, bs->fence.batch_id);
195 bs->fence.batch_id = 0;
196 bs->usage.usage = 0;
197 bs->next = NULL;
198 bs->last_added_obj = NULL;
199 }
200
201 /* this is where deferred resource unrefs occur */
202 static void
unref_resources(struct zink_screen * screen,struct zink_batch_state * bs)203 unref_resources(struct zink_screen *screen, struct zink_batch_state *bs)
204 {
205 while (util_dynarray_contains(&bs->unref_resources, struct zink_resource_object*)) {
206 struct zink_resource_object *obj = util_dynarray_pop(&bs->unref_resources, struct zink_resource_object*);
207 /* view pruning may be deferred to avoid ballooning */
208 if (obj->view_prune_timeline && zink_screen_check_last_finished(screen, obj->view_prune_timeline)) {
209 simple_mtx_lock(&obj->view_lock);
210 /* check again under lock in case multi-context use is in the same place */
211 if (obj->view_prune_timeline && zink_screen_check_last_finished(screen, obj->view_prune_timeline)) {
212 /* prune `view_prune_count` views */
213 if (obj->is_buffer) {
214 VkBufferView *views = obj->views.data;
215 for (unsigned i = 0; i < obj->view_prune_count; i++)
216 VKSCR(DestroyBufferView)(screen->dev, views[i], NULL);
217 } else {
218 VkImageView *views = obj->views.data;
219 for (unsigned i = 0; i < obj->view_prune_count; i++)
220 VKSCR(DestroyImageView)(screen->dev, views[i], NULL);
221 }
222 size_t offset = obj->view_prune_count * sizeof(VkBufferView);
223 uint8_t *data = obj->views.data;
224 /* shift the view array to the start */
225 memcpy(data, data + offset, obj->views.size - offset);
226 /* adjust the array size */
227 obj->views.size -= offset;
228 obj->view_prune_count = 0;
229 obj->view_prune_timeline = 0;
230 }
231 simple_mtx_unlock(&obj->view_lock);
232 }
233 /* this is typically where resource objects get destroyed */
234 zink_resource_object_reference(screen, &obj, NULL);
235 }
236 }
237
238 /* utility for resetting a batch state; called on context destruction */
239 void
zink_clear_batch_state(struct zink_context * ctx,struct zink_batch_state * bs)240 zink_clear_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
241 {
242 bs->fence.completed = true;
243 zink_reset_batch_state(ctx, bs);
244 unref_resources(zink_screen(ctx->base.screen), bs);
245 }
246
247 /* utility for managing the singly-linked batch state list */
248 static void
pop_batch_state(struct zink_context * ctx)249 pop_batch_state(struct zink_context *ctx)
250 {
251 const struct zink_batch_state *bs = ctx->batch_states;
252 ctx->batch_states = bs->next;
253 ctx->batch_states_count--;
254 if (ctx->last_fence == &bs->fence)
255 ctx->last_fence = NULL;
256 }
257
258 /* reset all batch states and append to the free state list
259 * only usable after a full stall
260 */
261 void
zink_batch_reset_all(struct zink_context * ctx)262 zink_batch_reset_all(struct zink_context *ctx)
263 {
264 while (ctx->batch_states) {
265 struct zink_batch_state *bs = ctx->batch_states;
266 bs->fence.completed = true;
267 pop_batch_state(ctx);
268 zink_reset_batch_state(ctx, bs);
269 if (ctx->last_free_batch_state)
270 ctx->last_free_batch_state->next = bs;
271 else
272 ctx->free_batch_states = bs;
273 ctx->last_free_batch_state = bs;
274 }
275 }
276
277 /* called only on context destruction */
278 void
zink_batch_state_destroy(struct zink_screen * screen,struct zink_batch_state * bs)279 zink_batch_state_destroy(struct zink_screen *screen, struct zink_batch_state *bs)
280 {
281 if (!bs)
282 return;
283
284 util_queue_fence_destroy(&bs->flush_completed);
285
286 cnd_destroy(&bs->usage.flush);
287 mtx_destroy(&bs->usage.mtx);
288
289 if (bs->cmdbuf)
290 VKSCR(FreeCommandBuffers)(screen->dev, bs->cmdpool, 1, &bs->cmdbuf);
291 if (bs->reordered_cmdbuf)
292 VKSCR(FreeCommandBuffers)(screen->dev, bs->cmdpool, 1, &bs->reordered_cmdbuf);
293 if (bs->cmdpool)
294 VKSCR(DestroyCommandPool)(screen->dev, bs->cmdpool, NULL);
295 if (bs->unsynchronized_cmdbuf)
296 VKSCR(FreeCommandBuffers)(screen->dev, bs->unsynchronized_cmdpool, 1, &bs->unsynchronized_cmdbuf);
297 if (bs->unsynchronized_cmdpool)
298 VKSCR(DestroyCommandPool)(screen->dev, bs->unsynchronized_cmdpool, NULL);
299 free(bs->real_objs.objs);
300 free(bs->slab_objs.objs);
301 free(bs->sparse_objs.objs);
302 util_dynarray_fini(&bs->freed_sparse_backing_bos);
303 util_dynarray_fini(&bs->dead_querypools);
304 util_dynarray_fini(&bs->dgc.pipelines);
305 util_dynarray_fini(&bs->dgc.layouts);
306 util_dynarray_fini(&bs->swapchain_obj);
307 util_dynarray_fini(&bs->zombie_samplers);
308 util_dynarray_fini(&bs->unref_resources);
309 util_dynarray_fini(&bs->bindless_releases[0]);
310 util_dynarray_fini(&bs->bindless_releases[1]);
311 util_dynarray_fini(&bs->acquires);
312 util_dynarray_fini(&bs->acquire_flags);
313 unsigned num_mfences = util_dynarray_num_elements(&bs->fence.mfences, void *);
314 struct zink_tc_fence **mfence = bs->fence.mfences.data;
315 for (unsigned i = 0; i < num_mfences; i++) {
316 mfence[i]->fence = NULL;
317 }
318 util_dynarray_fini(&bs->fence.mfences);
319 zink_batch_descriptor_deinit(screen, bs);
320 ralloc_free(bs);
321 }
322
323 /* batch states are created:
324 * - on context creation
325 * - dynamically up to a threshold if no free ones are available
326 */
327 static struct zink_batch_state *
create_batch_state(struct zink_context * ctx)328 create_batch_state(struct zink_context *ctx)
329 {
330 struct zink_screen *screen = zink_screen(ctx->base.screen);
331 struct zink_batch_state *bs = rzalloc(NULL, struct zink_batch_state);
332 VkCommandPoolCreateInfo cpci = {0};
333 cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
334 cpci.queueFamilyIndex = screen->gfx_queue;
335 VkResult result;
336
337 VRAM_ALLOC_LOOP(result,
338 VKSCR(CreateCommandPool)(screen->dev, &cpci, NULL, &bs->cmdpool),
339 if (result != VK_SUCCESS) {
340 mesa_loge("ZINK: vkCreateCommandPool failed (%s)", vk_Result_to_str(result));
341 goto fail;
342 }
343 );
344 VRAM_ALLOC_LOOP(result,
345 VKSCR(CreateCommandPool)(screen->dev, &cpci, NULL, &bs->unsynchronized_cmdpool),
346 if (result != VK_SUCCESS) {
347 mesa_loge("ZINK: vkCreateCommandPool failed (%s)", vk_Result_to_str(result));
348 goto fail;
349 }
350 );
351
352 VkCommandBuffer cmdbufs[2];
353 VkCommandBufferAllocateInfo cbai = {0};
354 cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
355 cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
356 cbai.commandPool = bs->cmdpool;
357 cbai.commandBufferCount = 2;
358
359 VRAM_ALLOC_LOOP(result,
360 VKSCR(AllocateCommandBuffers)(screen->dev, &cbai, cmdbufs),
361 if (result != VK_SUCCESS) {
362 mesa_loge("ZINK: vkAllocateCommandBuffers failed (%s)", vk_Result_to_str(result));
363 goto fail;
364 }
365 );
366
367 bs->cmdbuf = cmdbufs[0];
368 bs->reordered_cmdbuf = cmdbufs[1];
369
370 cbai.commandPool = bs->unsynchronized_cmdpool;
371 cbai.commandBufferCount = 1;
372 VRAM_ALLOC_LOOP(result,
373 VKSCR(AllocateCommandBuffers)(screen->dev, &cbai, &bs->unsynchronized_cmdbuf);,
374 if (result != VK_SUCCESS) {
375 mesa_loge("ZINK: vkAllocateCommandBuffers failed (%s)", vk_Result_to_str(result));
376 goto fail;
377 }
378 );
379
380 #define SET_CREATE_OR_FAIL(ptr) \
381 if (!_mesa_set_init(ptr, bs, _mesa_hash_pointer, _mesa_key_pointer_equal)) \
382 goto fail
383
384 bs->ctx = ctx;
385
386 SET_CREATE_OR_FAIL(&bs->programs);
387 SET_CREATE_OR_FAIL(&bs->active_queries);
388 SET_CREATE_OR_FAIL(&bs->dmabuf_exports);
389 util_dynarray_init(&bs->signal_semaphores, NULL);
390 util_dynarray_init(&bs->wait_semaphores, NULL);
391 util_dynarray_init(&bs->fd_wait_semaphores, NULL);
392 util_dynarray_init(&bs->fences, NULL);
393 util_dynarray_init(&bs->dead_querypools, NULL);
394 util_dynarray_init(&bs->dgc.pipelines, NULL);
395 util_dynarray_init(&bs->dgc.layouts, NULL);
396 util_dynarray_init(&bs->wait_semaphore_stages, NULL);
397 util_dynarray_init(&bs->fd_wait_semaphore_stages, NULL);
398 util_dynarray_init(&bs->zombie_samplers, NULL);
399 util_dynarray_init(&bs->freed_sparse_backing_bos, NULL);
400 util_dynarray_init(&bs->unref_resources, NULL);
401 util_dynarray_init(&bs->acquires, NULL);
402 util_dynarray_init(&bs->acquire_flags, NULL);
403 util_dynarray_init(&bs->bindless_releases[0], NULL);
404 util_dynarray_init(&bs->bindless_releases[1], NULL);
405 util_dynarray_init(&bs->swapchain_obj, NULL);
406 util_dynarray_init(&bs->fence.mfences, NULL);
407
408 cnd_init(&bs->usage.flush);
409 mtx_init(&bs->usage.mtx, mtx_plain);
410 simple_mtx_init(&bs->exportable_lock, mtx_plain);
411 memset(&bs->buffer_indices_hashlist, -1, sizeof(bs->buffer_indices_hashlist));
412
413 if (!zink_batch_descriptor_init(screen, bs))
414 goto fail;
415
416 util_queue_fence_init(&bs->flush_completed);
417
418 return bs;
419 fail:
420 zink_batch_state_destroy(screen, bs);
421 return NULL;
422 }
423
424 /* a batch state is considered "free" if it is both submitted and completed */
425 static inline bool
find_unused_state(struct zink_batch_state * bs)426 find_unused_state(struct zink_batch_state *bs)
427 {
428 struct zink_fence *fence = &bs->fence;
429 /* we can't reset these from fence_finish because threads */
430 bool completed = p_atomic_read(&fence->completed);
431 bool submitted = p_atomic_read(&fence->submitted);
432 return submitted && completed;
433 }
434
435 /* find a "free" batch state */
436 static struct zink_batch_state *
get_batch_state(struct zink_context * ctx,struct zink_batch * batch)437 get_batch_state(struct zink_context *ctx, struct zink_batch *batch)
438 {
439 struct zink_screen *screen = zink_screen(ctx->base.screen);
440 struct zink_batch_state *bs = NULL;
441
442 /* try from the ones that are known to be free first */
443 if (ctx->free_batch_states) {
444 bs = ctx->free_batch_states;
445 ctx->free_batch_states = bs->next;
446 if (bs == ctx->last_free_batch_state)
447 ctx->last_free_batch_state = NULL;
448 }
449 /* try from the ones that are given back to the screen next */
450 if (!bs) {
451 simple_mtx_lock(&screen->free_batch_states_lock);
452 if (screen->free_batch_states) {
453 bs = screen->free_batch_states;
454 bs->ctx = ctx;
455 screen->free_batch_states = bs->next;
456 if (bs == screen->last_free_batch_state)
457 screen->last_free_batch_state = NULL;
458 }
459 simple_mtx_unlock(&screen->free_batch_states_lock);
460 }
461 /* states are stored sequentially, so if the first one doesn't work, none of them will */
462 if (!bs && ctx->batch_states && ctx->batch_states->next) {
463 /* only a submitted state can be reused */
464 if (p_atomic_read(&ctx->batch_states->fence.submitted) &&
465 /* a submitted state must have completed before it can be reused */
466 (zink_screen_check_last_finished(screen, ctx->batch_states->fence.batch_id) ||
467 p_atomic_read(&ctx->batch_states->fence.completed))) {
468 bs = ctx->batch_states;
469 pop_batch_state(ctx);
470 }
471 }
472 if (bs) {
473 zink_reset_batch_state(ctx, bs);
474 } else {
475 if (!batch->state) {
476 /* this is batch init, so create a few more states for later use */
477 for (int i = 0; i < 3; i++) {
478 struct zink_batch_state *state = create_batch_state(ctx);
479 if (ctx->last_free_batch_state)
480 ctx->last_free_batch_state->next = state;
481 else
482 ctx->free_batch_states = state;
483 ctx->last_free_batch_state = state;
484 }
485 }
486 /* no batch states were available: make a new one */
487 bs = create_batch_state(ctx);
488 }
489 return bs;
490 }
491
492 /* reset the batch object: get a new state and unset 'has_work' to disable flushing */
493 void
zink_reset_batch(struct zink_context * ctx,struct zink_batch * batch)494 zink_reset_batch(struct zink_context *ctx, struct zink_batch *batch)
495 {
496 batch->state = get_batch_state(ctx, batch);
497 assert(batch->state);
498
499 batch->has_work = false;
500 }
501
502 void
zink_batch_bind_db(struct zink_context * ctx)503 zink_batch_bind_db(struct zink_context *ctx)
504 {
505 struct zink_screen *screen = zink_screen(ctx->base.screen);
506 struct zink_batch *batch = &ctx->batch;
507 unsigned count = 1;
508 VkDescriptorBufferBindingInfoEXT infos[2] = {0};
509 infos[0].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_BUFFER_BINDING_INFO_EXT;
510 infos[0].address = batch->state->dd.db->obj->bda;
511 infos[0].usage = batch->state->dd.db->obj->vkusage;
512 assert(infos[0].usage);
513
514 if (ctx->dd.bindless_init) {
515 infos[1].sType = VK_STRUCTURE_TYPE_DESCRIPTOR_BUFFER_BINDING_INFO_EXT;
516 infos[1].address = ctx->dd.db.bindless_db->obj->bda;
517 infos[1].usage = ctx->dd.db.bindless_db->obj->vkusage;
518 assert(infos[1].usage);
519 count++;
520 }
521 VKSCR(CmdBindDescriptorBuffersEXT)(batch->state->cmdbuf, count, infos);
522 VKSCR(CmdBindDescriptorBuffersEXT)(batch->state->reordered_cmdbuf, count, infos);
523 batch->state->dd.db_bound = true;
524 }
525
526 /* called on context creation and after flushing an old batch */
527 void
zink_start_batch(struct zink_context * ctx,struct zink_batch * batch)528 zink_start_batch(struct zink_context *ctx, struct zink_batch *batch)
529 {
530 struct zink_screen *screen = zink_screen(ctx->base.screen);
531 zink_reset_batch(ctx, batch);
532
533 batch->state->usage.unflushed = true;
534
535 VkCommandBufferBeginInfo cbbi = {0};
536 cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
537 cbbi.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
538
539 VkResult result;
540 VRAM_ALLOC_LOOP(result,
541 VKCTX(BeginCommandBuffer)(batch->state->cmdbuf, &cbbi),
542 if (result != VK_SUCCESS)
543 mesa_loge("ZINK: vkBeginCommandBuffer failed (%s)", vk_Result_to_str(result));
544 );
545 VRAM_ALLOC_LOOP(result,
546 VKCTX(BeginCommandBuffer)(batch->state->reordered_cmdbuf, &cbbi),
547 if (result != VK_SUCCESS)
548 mesa_loge("ZINK: vkBeginCommandBuffer failed (%s)", vk_Result_to_str(result));
549 );
550 VRAM_ALLOC_LOOP(result,
551 VKCTX(BeginCommandBuffer)(batch->state->unsynchronized_cmdbuf, &cbbi),
552 if (result != VK_SUCCESS)
553 mesa_loge("ZINK: vkBeginCommandBuffer failed (%s)", vk_Result_to_str(result));
554 );
555
556 batch->state->fence.completed = false;
557 if (ctx->last_fence) {
558 struct zink_batch_state *last_state = zink_batch_state(ctx->last_fence);
559 batch->last_batch_usage = &last_state->usage;
560 }
561
562 #ifdef HAVE_RENDERDOC_APP_H
563 if (VKCTX(CmdInsertDebugUtilsLabelEXT) && screen->renderdoc_api) {
564 VkDebugUtilsLabelEXT capture_label;
565 /* Magic fallback which lets us bridge the Wine barrier over to Linux RenderDoc. */
566 capture_label.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT;
567 capture_label.pNext = NULL;
568 capture_label.pLabelName = "vr-marker,frame_end,type,application";
569 memset(capture_label.color, 0, sizeof(capture_label.color));
570 VKCTX(CmdInsertDebugUtilsLabelEXT)(batch->state->unsynchronized_cmdbuf, &capture_label);
571 VKCTX(CmdInsertDebugUtilsLabelEXT)(batch->state->reordered_cmdbuf, &capture_label);
572 VKCTX(CmdInsertDebugUtilsLabelEXT)(batch->state->cmdbuf, &capture_label);
573 }
574
575 unsigned renderdoc_frame = p_atomic_read(&screen->renderdoc_frame);
576 if (!(ctx->flags & ZINK_CONTEXT_COPY_ONLY) && screen->renderdoc_api && !screen->renderdoc_capturing &&
577 ((screen->renderdoc_capture_all && screen->screen_id == 1) || (renderdoc_frame >= screen->renderdoc_capture_start && renderdoc_frame <= screen->renderdoc_capture_end))) {
578 screen->renderdoc_api->StartFrameCapture(RENDERDOC_DEVICEPOINTER_FROM_VKINSTANCE(screen->instance), NULL);
579 screen->renderdoc_capturing = true;
580 }
581 #endif
582
583 /* descriptor buffers must always be bound at the start of a batch */
584 if (zink_descriptor_mode == ZINK_DESCRIPTOR_MODE_DB && !(ctx->flags & ZINK_CONTEXT_COPY_ONLY))
585 zink_batch_bind_db(ctx);
586 /* zero init for unordered blits */
587 if (screen->info.have_EXT_attachment_feedback_loop_dynamic_state) {
588 VKCTX(CmdSetAttachmentFeedbackLoopEnableEXT)(ctx->batch.state->cmdbuf, 0);
589 VKCTX(CmdSetAttachmentFeedbackLoopEnableEXT)(ctx->batch.state->reordered_cmdbuf, 0);
590 VKCTX(CmdSetAttachmentFeedbackLoopEnableEXT)(ctx->batch.state->unsynchronized_cmdbuf, 0);
591 }
592 }
593
594 /* common operations to run post submit; split out for clarity */
595 static void
post_submit(void * data,void * gdata,int thread_index)596 post_submit(void *data, void *gdata, int thread_index)
597 {
598 struct zink_batch_state *bs = data;
599 struct zink_screen *screen = zink_screen(bs->ctx->base.screen);
600
601 if (bs->is_device_lost) {
602 if (bs->ctx->reset.reset)
603 bs->ctx->reset.reset(bs->ctx->reset.data, PIPE_GUILTY_CONTEXT_RESET);
604 else if (screen->abort_on_hang && !screen->robust_ctx_count)
605 /* if nothing can save us, abort */
606 abort();
607 screen->device_lost = true;
608 } else if (bs->ctx->batch_states_count > 5000) {
609 /* throttle in case something crazy is happening */
610 zink_screen_timeline_wait(screen, bs->fence.batch_id - 2500, OS_TIMEOUT_INFINITE);
611 }
612 /* this resets the buffer hashlist for the state's next use */
613 memset(&bs->buffer_indices_hashlist, -1, sizeof(bs->buffer_indices_hashlist));
614 }
615
616 typedef enum {
617 ZINK_SUBMIT_WAIT_ACQUIRE,
618 ZINK_SUBMIT_WAIT_FD,
619 ZINK_SUBMIT_CMDBUF,
620 ZINK_SUBMIT_SIGNAL,
621 ZINK_SUBMIT_MAX
622 } zink_submit;
623
624 static void
submit_queue(void * data,void * gdata,int thread_index)625 submit_queue(void *data, void *gdata, int thread_index)
626 {
627 struct zink_batch_state *bs = data;
628 struct zink_context *ctx = bs->ctx;
629 struct zink_screen *screen = zink_screen(ctx->base.screen);
630 VkSubmitInfo si[ZINK_SUBMIT_MAX] = {0};
631 VkSubmitInfo *submit = si;
632 int num_si = ZINK_SUBMIT_MAX;
633 while (!bs->fence.batch_id)
634 bs->fence.batch_id = (uint32_t)p_atomic_inc_return(&screen->curr_batch);
635 bs->usage.usage = bs->fence.batch_id;
636 bs->usage.unflushed = false;
637
638 uint64_t batch_id = bs->fence.batch_id;
639 /* first submit is just for acquire waits since they have a separate array */
640 for (unsigned i = 0; i < ARRAY_SIZE(si); i++)
641 si[i].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
642 si[ZINK_SUBMIT_WAIT_ACQUIRE].waitSemaphoreCount = util_dynarray_num_elements(&bs->acquires, VkSemaphore);
643 si[ZINK_SUBMIT_WAIT_ACQUIRE].pWaitSemaphores = bs->acquires.data;
644 while (util_dynarray_num_elements(&bs->acquire_flags, VkPipelineStageFlags) < si[ZINK_SUBMIT_WAIT_ACQUIRE].waitSemaphoreCount) {
645 VkPipelineStageFlags mask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
646 util_dynarray_append(&bs->acquire_flags, VkPipelineStageFlags, mask);
647 }
648 assert(util_dynarray_num_elements(&bs->acquires, VkSemaphore) <= util_dynarray_num_elements(&bs->acquire_flags, VkPipelineStageFlags));
649 si[ZINK_SUBMIT_WAIT_ACQUIRE].pWaitDstStageMask = bs->acquire_flags.data;
650
651 si[ZINK_SUBMIT_WAIT_FD].waitSemaphoreCount = util_dynarray_num_elements(&bs->fd_wait_semaphores, VkSemaphore);
652 si[ZINK_SUBMIT_WAIT_FD].pWaitSemaphores = bs->fd_wait_semaphores.data;
653 while (util_dynarray_num_elements(&bs->fd_wait_semaphore_stages, VkPipelineStageFlags) < si[ZINK_SUBMIT_WAIT_FD].waitSemaphoreCount) {
654 VkPipelineStageFlags mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
655 util_dynarray_append(&bs->fd_wait_semaphore_stages, VkPipelineStageFlags, mask);
656 }
657 assert(util_dynarray_num_elements(&bs->fd_wait_semaphores, VkSemaphore) <= util_dynarray_num_elements(&bs->fd_wait_semaphore_stages, VkPipelineStageFlags));
658 si[ZINK_SUBMIT_WAIT_FD].pWaitDstStageMask = bs->fd_wait_semaphore_stages.data;
659
660 if (si[ZINK_SUBMIT_WAIT_ACQUIRE].waitSemaphoreCount == 0) {
661 num_si--;
662 submit++;
663 if (si[ZINK_SUBMIT_WAIT_FD].waitSemaphoreCount == 0) {
664 num_si--;
665 submit++;
666 }
667 }
668
669 /* then the real submit */
670 si[ZINK_SUBMIT_CMDBUF].waitSemaphoreCount = util_dynarray_num_elements(&bs->wait_semaphores, VkSemaphore);
671 si[ZINK_SUBMIT_CMDBUF].pWaitSemaphores = bs->wait_semaphores.data;
672 si[ZINK_SUBMIT_CMDBUF].pWaitDstStageMask = bs->wait_semaphore_stages.data;
673 VkCommandBuffer cmdbufs[3];
674 unsigned c = 0;
675 if (bs->has_unsync)
676 cmdbufs[c++] = bs->unsynchronized_cmdbuf;
677 if (bs->has_barriers)
678 cmdbufs[c++] = bs->reordered_cmdbuf;
679 cmdbufs[c++] = bs->cmdbuf;
680 si[ZINK_SUBMIT_CMDBUF].pCommandBuffers = cmdbufs;
681 si[ZINK_SUBMIT_CMDBUF].commandBufferCount = c;
682 /* assorted signal submit from wsi/externals */
683 si[ZINK_SUBMIT_CMDBUF].signalSemaphoreCount = util_dynarray_num_elements(&bs->signal_semaphores, VkSemaphore);
684 si[ZINK_SUBMIT_CMDBUF].pSignalSemaphores = bs->signal_semaphores.data;
685
686 /* then the signal submit with the timeline (fence) semaphore */
687 VkSemaphore signals[3];
688 si[ZINK_SUBMIT_SIGNAL].signalSemaphoreCount = !!bs->signal_semaphore;
689 signals[0] = bs->signal_semaphore;
690 si[ZINK_SUBMIT_SIGNAL].pSignalSemaphores = signals;
691 VkTimelineSemaphoreSubmitInfo tsi = {0};
692 uint64_t signal_values[2] = {0};
693 tsi.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO;
694 si[ZINK_SUBMIT_SIGNAL].pNext = &tsi;
695 tsi.pSignalSemaphoreValues = signal_values;
696 signal_values[si[ZINK_SUBMIT_SIGNAL].signalSemaphoreCount] = batch_id;
697 signals[si[ZINK_SUBMIT_SIGNAL].signalSemaphoreCount++] = screen->sem;
698 tsi.signalSemaphoreValueCount = si[ZINK_SUBMIT_SIGNAL].signalSemaphoreCount;
699
700 if (bs->present)
701 signals[si[ZINK_SUBMIT_SIGNAL].signalSemaphoreCount++] = bs->present;
702 tsi.signalSemaphoreValueCount = si[ZINK_SUBMIT_SIGNAL].signalSemaphoreCount;
703
704
705 VkResult result;
706 VRAM_ALLOC_LOOP(result,
707 VKSCR(EndCommandBuffer)(bs->cmdbuf),
708 if (result != VK_SUCCESS) {
709 mesa_loge("ZINK: vkEndCommandBuffer failed (%s)", vk_Result_to_str(result));
710 bs->is_device_lost = true;
711 goto end;
712 }
713 );
714 if (bs->has_barriers) {
715 if (bs->unordered_write_access) {
716 VkMemoryBarrier mb;
717 mb.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
718 mb.pNext = NULL;
719 mb.srcAccessMask = bs->unordered_write_access;
720 mb.dstAccessMask = VK_ACCESS_NONE;
721 VKSCR(CmdPipelineBarrier)(bs->reordered_cmdbuf,
722 bs->unordered_write_stages,
723 screen->info.have_KHR_synchronization2 ? VK_PIPELINE_STAGE_NONE : VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
724 0, 1, &mb, 0, NULL, 0, NULL);
725 }
726 VRAM_ALLOC_LOOP(result,
727 VKSCR(EndCommandBuffer)(bs->reordered_cmdbuf),
728 if (result != VK_SUCCESS) {
729 mesa_loge("ZINK: vkEndCommandBuffer failed (%s)", vk_Result_to_str(result));
730 bs->is_device_lost = true;
731 goto end;
732 }
733 );
734 }
735 if (bs->has_unsync) {
736 VRAM_ALLOC_LOOP(result,
737 VKSCR(EndCommandBuffer)(bs->unsynchronized_cmdbuf),
738 if (result != VK_SUCCESS) {
739 mesa_loge("ZINK: vkEndCommandBuffer failed (%s)", vk_Result_to_str(result));
740 bs->is_device_lost = true;
741 goto end;
742 }
743 );
744 }
745
746 if (!si[ZINK_SUBMIT_SIGNAL].signalSemaphoreCount)
747 num_si--;
748
749 simple_mtx_lock(&screen->queue_lock);
750 VRAM_ALLOC_LOOP(result,
751 VKSCR(QueueSubmit)(screen->queue, num_si, submit, VK_NULL_HANDLE),
752 if (result != VK_SUCCESS) {
753 mesa_loge("ZINK: vkQueueSubmit failed (%s)", vk_Result_to_str(result));
754 bs->is_device_lost = true;
755 }
756 );
757 simple_mtx_unlock(&screen->queue_lock);
758
759 unsigned i = 0;
760 VkSemaphore *sem = bs->signal_semaphores.data;
761 set_foreach_remove(&bs->dmabuf_exports, entry) {
762 struct zink_resource *res = (void*)entry->key;
763 for (; res; res = zink_resource(res->base.b.next))
764 zink_screen_import_dmabuf_semaphore(screen, res, sem[i++]);
765
766 struct pipe_resource *pres = (void*)entry->key;
767 pipe_resource_reference(&pres, NULL);
768 }
769
770 bs->usage.submit_count++;
771 end:
772 cnd_broadcast(&bs->usage.flush);
773
774 p_atomic_set(&bs->fence.submitted, true);
775 unref_resources(screen, bs);
776 }
777
778 /* called during flush */
779 void
zink_end_batch(struct zink_context * ctx,struct zink_batch * batch)780 zink_end_batch(struct zink_context *ctx, struct zink_batch *batch)
781 {
782 if (!ctx->queries_disabled)
783 zink_suspend_queries(ctx, batch);
784
785
786 struct zink_screen *screen = zink_screen(ctx->base.screen);
787 if (ctx->tc && !ctx->track_renderpasses)
788 tc_driver_internal_flush_notify(ctx->tc);
789 struct zink_batch_state *bs;
790
791 /* oom flushing is triggered to handle stupid piglit tests like streaming-texture-leak */
792 if (ctx->oom_flush || ctx->batch_states_count > 25) {
793 assert(!ctx->batch_states_count || ctx->batch_states);
794 while (ctx->batch_states) {
795 bs = ctx->batch_states;
796 struct zink_fence *fence = &bs->fence;
797 /* once an incomplete state is reached, no more will be complete */
798 if (!zink_check_batch_completion(ctx, fence->batch_id))
799 break;
800
801 pop_batch_state(ctx);
802 zink_reset_batch_state(ctx, bs);
803 if (ctx->last_free_batch_state)
804 ctx->last_free_batch_state->next = bs;
805 else
806 ctx->free_batch_states = bs;
807 ctx->last_free_batch_state = bs;
808 }
809 if (ctx->batch_states_count > 50)
810 ctx->oom_flush = true;
811 }
812
813 bs = batch->state;
814 if (ctx->last_fence)
815 zink_batch_state(ctx->last_fence)->next = bs;
816 else {
817 assert(!ctx->batch_states);
818 ctx->batch_states = bs;
819 }
820 ctx->last_fence = &bs->fence;
821 ctx->batch_states_count++;
822 batch->work_count = 0;
823
824 /* this is swapchain presentation semaphore handling */
825 if (batch->swapchain) {
826 if (zink_kopper_acquired(batch->swapchain->obj->dt, batch->swapchain->obj->dt_idx) && !batch->swapchain->obj->present) {
827 batch->state->present = zink_kopper_present(screen, batch->swapchain);
828 batch->state->swapchain = batch->swapchain;
829 }
830 batch->swapchain = NULL;
831 }
832
833 if (screen->device_lost)
834 return;
835
836 if (ctx->tc) {
837 set_foreach(&bs->active_queries, entry)
838 zink_query_sync(ctx, (void*)entry->key);
839 }
840
841 set_foreach(&bs->dmabuf_exports, entry) {
842 struct zink_resource *res = (void*)entry->key;
843 if (screen->info.have_KHR_synchronization2) {
844 VkImageMemoryBarrier2 imb;
845 zink_resource_image_barrier2_init(&imb, res, res->layout, 0, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
846 imb.srcQueueFamilyIndex = screen->gfx_queue;
847 imb.dstQueueFamilyIndex = VK_QUEUE_FAMILY_FOREIGN_EXT;
848 VkDependencyInfo dep = {
849 VK_STRUCTURE_TYPE_DEPENDENCY_INFO,
850 NULL,
851 0,
852 0,
853 NULL,
854 0,
855 NULL,
856 1,
857 &imb
858 };
859 VKCTX(CmdPipelineBarrier2)(bs->cmdbuf, &dep);
860 } else {
861 VkImageMemoryBarrier imb;
862 zink_resource_image_barrier_init(&imb, res, res->layout, 0, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT);
863 imb.srcQueueFamilyIndex = screen->gfx_queue;
864 imb.dstQueueFamilyIndex = VK_QUEUE_FAMILY_FOREIGN_EXT;
865 VKCTX(CmdPipelineBarrier)(
866 bs->cmdbuf,
867 res->obj->access_stage,
868 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT,
869 0,
870 0, NULL,
871 0, NULL,
872 1, &imb
873 );
874 }
875 res->queue = VK_QUEUE_FAMILY_FOREIGN_EXT;
876
877 for (; res; res = zink_resource(res->base.b.next)) {
878 VkSemaphore sem = zink_create_exportable_semaphore(screen);
879 if (sem)
880 util_dynarray_append(&ctx->batch.state->signal_semaphores, VkSemaphore, sem);
881 }
882 }
883
884 if (screen->threaded_submit) {
885 util_queue_add_job(&screen->flush_queue, bs, &bs->flush_completed,
886 submit_queue, post_submit, 0);
887 } else {
888 submit_queue(bs, NULL, 0);
889 post_submit(bs, NULL, 0);
890 }
891 #ifdef HAVE_RENDERDOC_APP_H
892 if (!(ctx->flags & ZINK_CONTEXT_COPY_ONLY) && screen->renderdoc_capturing && p_atomic_read(&screen->renderdoc_frame) > screen->renderdoc_capture_end) {
893 screen->renderdoc_api->EndFrameCapture(RENDERDOC_DEVICEPOINTER_FROM_VKINSTANCE(screen->instance), NULL);
894 screen->renderdoc_capturing = false;
895 }
896 #endif
897 }
898
899 static int
batch_find_resource(struct zink_batch_state * bs,struct zink_resource_object * obj,struct zink_batch_obj_list * list)900 batch_find_resource(struct zink_batch_state *bs, struct zink_resource_object *obj, struct zink_batch_obj_list *list)
901 {
902 unsigned hash = obj->bo->unique_id & (BUFFER_HASHLIST_SIZE-1);
903 int buffer_index = bs->buffer_indices_hashlist[hash];
904
905 /* not found or found */
906 if (buffer_index < 0 || (buffer_index < list->num_buffers && list->objs[buffer_index] == obj))
907 return buffer_index;
908
909 /* Hash collision, look for the BO in the list of list->objs linearly. */
910 for (int i = list->num_buffers - 1; i >= 0; i--) {
911 if (list->objs[i] == obj) {
912 /* Put this buffer in the hash list.
913 * This will prevent additional hash collisions if there are
914 * several consecutive lookup_buffer calls for the same buffer.
915 *
916 * Example: Assuming list->objs A,B,C collide in the hash list,
917 * the following sequence of list->objs:
918 * AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
919 * will collide here: ^ and here: ^,
920 * meaning that we should get very few collisions in the end. */
921 bs->buffer_indices_hashlist[hash] = i & (BUFFER_HASHLIST_SIZE-1);
922 return i;
923 }
924 }
925 return -1;
926 }
927
928 void
zink_batch_reference_resource_rw(struct zink_batch * batch,struct zink_resource * res,bool write)929 zink_batch_reference_resource_rw(struct zink_batch *batch, struct zink_resource *res, bool write)
930 {
931 /* if the resource already has usage of any sort set for this batch, */
932 if (!zink_resource_usage_matches(res, batch->state) ||
933 /* or if it's bound somewhere */
934 !zink_resource_has_binds(res))
935 /* then it already has a batch ref and doesn't need one here */
936 zink_batch_reference_resource(batch, res);
937 zink_batch_resource_usage_set(batch, res, write, res->obj->is_buffer);
938 }
939
940 void
zink_batch_add_wait_semaphore(struct zink_batch * batch,VkSemaphore sem)941 zink_batch_add_wait_semaphore(struct zink_batch *batch, VkSemaphore sem)
942 {
943 util_dynarray_append(&batch->state->acquires, VkSemaphore, sem);
944 }
945
946 static bool
batch_ptr_add_usage(struct zink_batch * batch,struct set * s,void * ptr)947 batch_ptr_add_usage(struct zink_batch *batch, struct set *s, void *ptr)
948 {
949 bool found = false;
950 _mesa_set_search_or_add(s, ptr, &found);
951 return !found;
952 }
953
954 /* this is a vague, handwave-y estimate */
955 ALWAYS_INLINE static void
check_oom_flush(struct zink_context * ctx,const struct zink_batch * batch)956 check_oom_flush(struct zink_context *ctx, const struct zink_batch *batch)
957 {
958 const VkDeviceSize resource_size = batch->state->resource_size;
959 if (resource_size >= zink_screen(ctx->base.screen)->clamp_video_mem) {
960 ctx->oom_flush = true;
961 ctx->oom_stall = true;
962 }
963 }
964
965 /* this adds a ref (batch tracking) */
966 void
zink_batch_reference_resource(struct zink_batch * batch,struct zink_resource * res)967 zink_batch_reference_resource(struct zink_batch *batch, struct zink_resource *res)
968 {
969 if (!zink_batch_reference_resource_move(batch, res))
970 zink_resource_object_reference(NULL, NULL, res->obj);
971 }
972
973 /* this adds batch usage */
974 bool
zink_batch_reference_resource_move(struct zink_batch * batch,struct zink_resource * res)975 zink_batch_reference_resource_move(struct zink_batch *batch, struct zink_resource *res)
976 {
977 struct zink_batch_state *bs = batch->state;
978
979 simple_mtx_lock(&batch->ref_lock);
980 /* swapchains are special */
981 if (zink_is_swapchain(res)) {
982 struct zink_resource_object **swapchains = bs->swapchain_obj.data;
983 unsigned count = util_dynarray_num_elements(&bs->swapchain_obj, struct zink_resource_object*);
984 for (unsigned i = 0; i < count; i++) {
985 if (swapchains[i] == res->obj) {
986 simple_mtx_unlock(&batch->ref_lock);
987 return true;
988 }
989 }
990 util_dynarray_append(&bs->swapchain_obj, struct zink_resource_object*, res->obj);
991 simple_mtx_unlock(&batch->ref_lock);
992 return false;
993 }
994 /* Fast exit for no-op calls.
995 * This is very effective with suballocators and linear uploaders that
996 * are outside of the winsys.
997 */
998 if (res->obj == bs->last_added_obj) {
999 simple_mtx_unlock(&batch->ref_lock);
1000 return true;
1001 }
1002
1003 struct zink_bo *bo = res->obj->bo;
1004 struct zink_batch_obj_list *list;
1005 if (!(res->base.b.flags & PIPE_RESOURCE_FLAG_SPARSE)) {
1006 if (!bo->mem) {
1007 list = &bs->slab_objs;
1008 } else {
1009 list = &bs->real_objs;
1010 }
1011 } else {
1012 list = &bs->sparse_objs;
1013 }
1014 int idx = batch_find_resource(bs, res->obj, list);
1015 if (idx >= 0) {
1016 simple_mtx_unlock(&batch->ref_lock);
1017 return true;
1018 }
1019
1020 if (list->num_buffers >= list->max_buffers) {
1021 unsigned new_max = MAX2(list->max_buffers + 16, (unsigned)(list->max_buffers * 1.3));
1022 struct zink_resource_object **objs = realloc(list->objs, new_max * sizeof(void*));
1023 if (!objs) {
1024 /* things are about to go dramatically wrong anyway */
1025 mesa_loge("zink: buffer list realloc failed due to oom!\n");
1026 abort();
1027 }
1028 list->objs = objs;
1029 list->max_buffers = new_max;
1030 }
1031 idx = list->num_buffers++;
1032 list->objs[idx] = res->obj;
1033 unsigned hash = bo->unique_id & (BUFFER_HASHLIST_SIZE-1);
1034 bs->buffer_indices_hashlist[hash] = idx & 0x7fff;
1035 bs->last_added_obj = res->obj;
1036 if (!(res->base.b.flags & PIPE_RESOURCE_FLAG_SPARSE)) {
1037 bs->resource_size += res->obj->size;
1038 } else {
1039 /* Sparse backing pages are not directly referenced by the batch as
1040 * there can be a lot of them.
1041 * Instead, they are kept referenced in one of two ways:
1042 * - While they are committed, they are directly referenced from the
1043 * resource's state.
1044 * - Upon de-commit, they are added to the freed_sparse_backing_bos
1045 * list, which will defer destroying the resource until the batch
1046 * performing unbind finishes.
1047 */
1048 }
1049 check_oom_flush(batch->state->ctx, batch);
1050 batch->has_work = true;
1051 simple_mtx_unlock(&batch->ref_lock);
1052 return false;
1053 }
1054
1055 /* this is how programs achieve deferred deletion */
1056 void
zink_batch_reference_program(struct zink_batch * batch,struct zink_program * pg)1057 zink_batch_reference_program(struct zink_batch *batch,
1058 struct zink_program *pg)
1059 {
1060 if (zink_batch_usage_matches(pg->batch_uses, batch->state) ||
1061 !batch_ptr_add_usage(batch, &batch->state->programs, pg))
1062 return;
1063 pipe_reference(NULL, &pg->reference);
1064 zink_batch_usage_set(&pg->batch_uses, batch->state);
1065 batch->has_work = true;
1066 }
1067
1068 /* a fast (hopefully) way to check whether a given batch has completed */
1069 bool
zink_screen_usage_check_completion(struct zink_screen * screen,const struct zink_batch_usage * u)1070 zink_screen_usage_check_completion(struct zink_screen *screen, const struct zink_batch_usage *u)
1071 {
1072 if (!zink_batch_usage_exists(u))
1073 return true;
1074 if (zink_batch_usage_is_unflushed(u))
1075 return false;
1076
1077 return zink_screen_timeline_wait(screen, u->usage, 0);
1078 }
1079
1080 /* an even faster check that doesn't ioctl */
1081 bool
zink_screen_usage_check_completion_fast(struct zink_screen * screen,const struct zink_batch_usage * u)1082 zink_screen_usage_check_completion_fast(struct zink_screen *screen, const struct zink_batch_usage *u)
1083 {
1084 if (!zink_batch_usage_exists(u))
1085 return true;
1086 if (zink_batch_usage_is_unflushed(u))
1087 return false;
1088
1089 return zink_screen_check_last_finished(screen, u->usage);
1090 }
1091
1092 bool
zink_batch_usage_check_completion(struct zink_context * ctx,const struct zink_batch_usage * u)1093 zink_batch_usage_check_completion(struct zink_context *ctx, const struct zink_batch_usage *u)
1094 {
1095 if (!zink_batch_usage_exists(u))
1096 return true;
1097 if (zink_batch_usage_is_unflushed(u))
1098 return false;
1099 return zink_check_batch_completion(ctx, u->usage);
1100 }
1101
1102 static void
batch_usage_wait(struct zink_context * ctx,struct zink_batch_usage * u,bool trywait)1103 batch_usage_wait(struct zink_context *ctx, struct zink_batch_usage *u, bool trywait)
1104 {
1105 if (!zink_batch_usage_exists(u))
1106 return;
1107 if (zink_batch_usage_is_unflushed(u)) {
1108 if (likely(u == &ctx->batch.state->usage))
1109 ctx->base.flush(&ctx->base, NULL, PIPE_FLUSH_HINT_FINISH);
1110 else { //multi-context
1111 mtx_lock(&u->mtx);
1112 if (trywait) {
1113 struct timespec ts = {0, 10000};
1114 cnd_timedwait(&u->flush, &u->mtx, &ts);
1115 } else
1116 cnd_wait(&u->flush, &u->mtx);
1117 mtx_unlock(&u->mtx);
1118 }
1119 }
1120 zink_wait_on_batch(ctx, u->usage);
1121 }
1122
1123 void
zink_batch_usage_wait(struct zink_context * ctx,struct zink_batch_usage * u)1124 zink_batch_usage_wait(struct zink_context *ctx, struct zink_batch_usage *u)
1125 {
1126 batch_usage_wait(ctx, u, false);
1127 }
1128
1129 void
zink_batch_usage_try_wait(struct zink_context * ctx,struct zink_batch_usage * u)1130 zink_batch_usage_try_wait(struct zink_context *ctx, struct zink_batch_usage *u)
1131 {
1132 batch_usage_wait(ctx, u, true);
1133 }
1134