• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include "zink_batch.h"
2 
3 #include "zink_context.h"
4 #include "zink_kopper.h"
5 #include "zink_fence.h"
6 #include "zink_framebuffer.h"
7 #include "zink_query.h"
8 #include "zink_program.h"
9 #include "zink_render_pass.h"
10 #include "zink_resource.h"
11 #include "zink_screen.h"
12 #include "zink_surface.h"
13 
14 #include "util/hash_table.h"
15 #include "util/u_debug.h"
16 #include "util/set.h"
17 
18 #ifdef VK_USE_PLATFORM_METAL_EXT
19 #include "QuartzCore/CAMetalLayer.h"
20 #endif
21 #include "wsi_common.h"
22 
23 void
debug_describe_zink_batch_state(char * buf,const struct zink_batch_state * ptr)24 debug_describe_zink_batch_state(char *buf, const struct zink_batch_state *ptr)
25 {
26    sprintf(buf, "zink_batch_state");
27 }
28 
29 void
zink_reset_batch_state(struct zink_context * ctx,struct zink_batch_state * bs)30 zink_reset_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
31 {
32    struct zink_screen *screen = zink_screen(ctx->base.screen);
33 
34    VkResult result = VKSCR(ResetCommandPool)(screen->dev, bs->cmdpool, 0);
35    if (result != VK_SUCCESS)
36       mesa_loge("ZINK: vkResetCommandPool failed (%s)", vk_Result_to_str(result));
37 
38    /* unref all used resources */
39    set_foreach_remove(bs->resources, entry) {
40       struct zink_resource_object *obj = (struct zink_resource_object *)entry->key;
41       if (!zink_resource_object_usage_unset(obj, bs)) {
42          obj->unordered_read = obj->unordered_write = false;
43          obj->access = 0;
44          obj->access_stage = 0;
45       }
46       util_dynarray_append(&bs->unref_resources, struct zink_resource_object*, obj);
47    }
48 
49    for (unsigned i = 0; i < 2; i++) {
50       while (util_dynarray_contains(&bs->bindless_releases[i], uint32_t)) {
51          uint32_t handle = util_dynarray_pop(&bs->bindless_releases[i], uint32_t);
52          bool is_buffer = ZINK_BINDLESS_IS_BUFFER(handle);
53          struct util_idalloc *ids = i ? &ctx->di.bindless[is_buffer].img_slots : &ctx->di.bindless[is_buffer].tex_slots;
54          util_idalloc_free(ids, is_buffer ? handle - ZINK_MAX_BINDLESS_HANDLES : handle);
55       }
56    }
57 
58    set_foreach_remove(bs->active_queries, entry) {
59       struct zink_query *query = (void*)entry->key;
60       zink_prune_query(screen, bs, query);
61    }
62 
63    set_foreach_remove(bs->surfaces, entry) {
64       struct zink_surface *surf = (struct zink_surface *)entry->key;
65       zink_batch_usage_unset(&surf->batch_uses, bs);
66       zink_surface_reference(screen, &surf, NULL);
67    }
68    set_foreach_remove(bs->bufferviews, entry) {
69       struct zink_buffer_view *buffer_view = (struct zink_buffer_view *)entry->key;
70       zink_batch_usage_unset(&buffer_view->batch_uses, bs);
71       zink_buffer_view_reference(screen, &buffer_view, NULL);
72    }
73 
74    util_dynarray_foreach(&bs->dead_framebuffers, struct zink_framebuffer*, fb) {
75       zink_framebuffer_reference(screen, fb, NULL);
76    }
77    util_dynarray_clear(&bs->dead_framebuffers);
78    util_dynarray_foreach(&bs->zombie_samplers, VkSampler, samp) {
79       VKSCR(DestroySampler)(screen->dev, *samp, NULL);
80    }
81    util_dynarray_clear(&bs->zombie_samplers);
82    util_dynarray_clear(&bs->persistent_resources);
83 
84    screen->batch_descriptor_reset(screen, bs);
85 
86    set_foreach_remove(bs->programs, entry) {
87       struct zink_program *pg = (struct zink_program*)entry->key;
88       zink_batch_usage_unset(&pg->batch_uses, bs);
89       zink_program_reference(ctx, &pg, NULL);
90    }
91 
92    bs->resource_size = 0;
93    bs->signal_semaphore = VK_NULL_HANDLE;
94    while (util_dynarray_contains(&bs->wait_semaphores, VkSemaphore))
95       VKSCR(DestroySemaphore)(screen->dev, util_dynarray_pop(&bs->wait_semaphores, VkSemaphore), NULL);
96    util_dynarray_clear(&bs->wait_semaphore_stages);
97 
98    bs->present = VK_NULL_HANDLE;
99    while (util_dynarray_contains(&bs->acquires, VkSemaphore))
100       VKSCR(DestroySemaphore)(screen->dev, util_dynarray_pop(&bs->acquires, VkSemaphore), NULL);
101    bs->swapchain = NULL;
102 
103    while (util_dynarray_contains(&bs->dead_swapchains, VkImageView))
104       VKSCR(DestroyImageView)(screen->dev, util_dynarray_pop(&bs->dead_swapchains, VkImageView), NULL);
105 
106    /* only reset submitted here so that tc fence desync can pick up the 'completed' flag
107     * before the state is reused
108     */
109    bs->fence.submitted = false;
110    bs->has_barriers = false;
111    if (bs->fence.batch_id)
112       zink_screen_update_last_finished(screen, bs->fence.batch_id);
113    bs->submit_count++;
114    bs->fence.batch_id = 0;
115    bs->usage.usage = 0;
116    bs->next = NULL;
117 }
118 
119 static void
unref_resources(struct zink_screen * screen,struct zink_batch_state * bs)120 unref_resources(struct zink_screen *screen, struct zink_batch_state *bs)
121 {
122    while (util_dynarray_contains(&bs->unref_resources, struct zink_resource_object*)) {
123       struct zink_resource_object *obj = util_dynarray_pop(&bs->unref_resources, struct zink_resource_object*);
124       zink_resource_object_reference(screen, &obj, NULL);
125    }
126 }
127 
128 void
zink_clear_batch_state(struct zink_context * ctx,struct zink_batch_state * bs)129 zink_clear_batch_state(struct zink_context *ctx, struct zink_batch_state *bs)
130 {
131    bs->fence.completed = true;
132    zink_reset_batch_state(ctx, bs);
133    unref_resources(zink_screen(ctx->base.screen), bs);
134 }
135 
136 static void
pop_batch_state(struct zink_context * ctx)137 pop_batch_state(struct zink_context *ctx)
138 {
139    const struct zink_batch_state *bs = ctx->batch_states;
140    ctx->batch_states = bs->next;
141    ctx->batch_states_count--;
142    if (ctx->last_fence == &bs->fence)
143       ctx->last_fence = NULL;
144 }
145 
146 void
zink_batch_reset_all(struct zink_context * ctx)147 zink_batch_reset_all(struct zink_context *ctx)
148 {
149    while (ctx->batch_states) {
150       struct zink_batch_state *bs = ctx->batch_states;
151       bs->fence.completed = true;
152       pop_batch_state(ctx);
153       zink_reset_batch_state(ctx, bs);
154       util_dynarray_append(&ctx->free_batch_states, struct zink_batch_state *, bs);
155    }
156 }
157 
158 void
zink_batch_state_destroy(struct zink_screen * screen,struct zink_batch_state * bs)159 zink_batch_state_destroy(struct zink_screen *screen, struct zink_batch_state *bs)
160 {
161    if (!bs)
162       return;
163 
164    util_queue_fence_destroy(&bs->flush_completed);
165 
166    cnd_destroy(&bs->usage.flush);
167    mtx_destroy(&bs->usage.mtx);
168 
169    if (bs->cmdbuf)
170       VKSCR(FreeCommandBuffers)(screen->dev, bs->cmdpool, 1, &bs->cmdbuf);
171    if (bs->barrier_cmdbuf)
172       VKSCR(FreeCommandBuffers)(screen->dev, bs->cmdpool, 1, &bs->barrier_cmdbuf);
173    if (bs->cmdpool)
174       VKSCR(DestroyCommandPool)(screen->dev, bs->cmdpool, NULL);
175 
176    util_dynarray_fini(&bs->zombie_samplers);
177    util_dynarray_fini(&bs->dead_framebuffers);
178    util_dynarray_fini(&bs->unref_resources);
179    util_dynarray_fini(&bs->bindless_releases[0]);
180    util_dynarray_fini(&bs->bindless_releases[1]);
181    util_dynarray_fini(&bs->acquires);
182    util_dynarray_fini(&bs->acquire_flags);
183    util_dynarray_fini(&bs->dead_swapchains);
184    _mesa_set_destroy(bs->surfaces, NULL);
185    _mesa_set_destroy(bs->bufferviews, NULL);
186    _mesa_set_destroy(bs->programs, NULL);
187    _mesa_set_destroy(bs->active_queries, NULL);
188    screen->batch_descriptor_deinit(screen, bs);
189    ralloc_free(bs);
190 }
191 
192 static struct zink_batch_state *
create_batch_state(struct zink_context * ctx)193 create_batch_state(struct zink_context *ctx)
194 {
195    struct zink_screen *screen = zink_screen(ctx->base.screen);
196    struct zink_batch_state *bs = rzalloc(NULL, struct zink_batch_state);
197    VkCommandPoolCreateInfo cpci = {0};
198    cpci.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
199    cpci.queueFamilyIndex = screen->gfx_queue;
200    VkResult result = VKSCR(CreateCommandPool)(screen->dev, &cpci, NULL, &bs->cmdpool);
201    if (result != VK_SUCCESS) {
202       mesa_loge("ZINK: vkCreateCommandPool failed (%s)", vk_Result_to_str(result));
203       goto fail;
204    }
205 
206    VkCommandBufferAllocateInfo cbai = {0};
207    cbai.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
208    cbai.commandPool = bs->cmdpool;
209    cbai.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
210    cbai.commandBufferCount = 1;
211 
212    result = VKSCR(AllocateCommandBuffers)(screen->dev, &cbai, &bs->cmdbuf);
213    if (result != VK_SUCCESS) {
214       mesa_loge("ZINK: vkAllocateCommandBuffers failed (%s)", vk_Result_to_str(result));
215       goto fail;
216    }
217 
218    result = VKSCR(AllocateCommandBuffers)(screen->dev, &cbai, &bs->barrier_cmdbuf);
219    if (result != VK_SUCCESS) {
220       mesa_loge("ZINK: vkAllocateCommandBuffers failed (%s)", vk_Result_to_str(result));
221       goto fail;
222    }
223 
224 #define SET_CREATE_OR_FAIL(ptr) \
225    ptr = _mesa_pointer_set_create(bs); \
226    if (!ptr) \
227       goto fail
228 
229    bs->ctx = ctx;
230 
231    SET_CREATE_OR_FAIL(bs->resources);
232    SET_CREATE_OR_FAIL(bs->surfaces);
233    SET_CREATE_OR_FAIL(bs->bufferviews);
234    SET_CREATE_OR_FAIL(bs->programs);
235    SET_CREATE_OR_FAIL(bs->active_queries);
236    util_dynarray_init(&bs->wait_semaphores, NULL);
237    util_dynarray_init(&bs->wait_semaphore_stages, NULL);
238    util_dynarray_init(&bs->zombie_samplers, NULL);
239    util_dynarray_init(&bs->dead_framebuffers, NULL);
240    util_dynarray_init(&bs->persistent_resources, NULL);
241    util_dynarray_init(&bs->unref_resources, NULL);
242    util_dynarray_init(&bs->acquires, NULL);
243    util_dynarray_init(&bs->acquire_flags, NULL);
244    util_dynarray_init(&bs->dead_swapchains, NULL);
245    util_dynarray_init(&bs->bindless_releases[0], NULL);
246    util_dynarray_init(&bs->bindless_releases[1], NULL);
247 
248    cnd_init(&bs->usage.flush);
249    mtx_init(&bs->usage.mtx, mtx_plain);
250 
251    if (!screen->batch_descriptor_init(screen, bs))
252       goto fail;
253 
254    util_queue_fence_init(&bs->flush_completed);
255 
256    return bs;
257 fail:
258    zink_batch_state_destroy(screen, bs);
259    return NULL;
260 }
261 
262 static inline bool
find_unused_state(struct zink_batch_state * bs)263 find_unused_state(struct zink_batch_state *bs)
264 {
265    struct zink_fence *fence = &bs->fence;
266    /* we can't reset these from fence_finish because threads */
267    bool completed = p_atomic_read(&fence->completed);
268    bool submitted = p_atomic_read(&fence->submitted);
269    return submitted && completed;
270 }
271 
272 static struct zink_batch_state *
get_batch_state(struct zink_context * ctx,struct zink_batch * batch)273 get_batch_state(struct zink_context *ctx, struct zink_batch *batch)
274 {
275    struct zink_screen *screen = zink_screen(ctx->base.screen);
276    struct zink_batch_state *bs = NULL;
277 
278    if (util_dynarray_num_elements(&ctx->free_batch_states, struct zink_batch_state*))
279       bs = util_dynarray_pop(&ctx->free_batch_states, struct zink_batch_state*);
280    if (!bs && ctx->batch_states) {
281       /* states are stored sequentially, so if the first one doesn't work, none of them will */
282       if (zink_screen_check_last_finished(screen, ctx->batch_states->fence.batch_id) ||
283           find_unused_state(ctx->batch_states)) {
284          bs = ctx->batch_states;
285          pop_batch_state(ctx);
286       }
287    }
288    if (bs) {
289       zink_reset_batch_state(ctx, bs);
290    } else {
291       if (!batch->state) {
292          /* this is batch init, so create a few more states for later use */
293          for (int i = 0; i < 3; i++) {
294             struct zink_batch_state *state = create_batch_state(ctx);
295             util_dynarray_append(&ctx->free_batch_states, struct zink_batch_state *, state);
296          }
297       }
298       bs = create_batch_state(ctx);
299    }
300    return bs;
301 }
302 
303 void
zink_reset_batch(struct zink_context * ctx,struct zink_batch * batch)304 zink_reset_batch(struct zink_context *ctx, struct zink_batch *batch)
305 {
306    batch->state = get_batch_state(ctx, batch);
307    assert(batch->state);
308 
309    batch->has_work = false;
310 }
311 
312 void
zink_start_batch(struct zink_context * ctx,struct zink_batch * batch)313 zink_start_batch(struct zink_context *ctx, struct zink_batch *batch)
314 {
315    zink_reset_batch(ctx, batch);
316 
317    batch->state->usage.unflushed = true;
318 
319    VkCommandBufferBeginInfo cbbi = {0};
320    cbbi.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
321    cbbi.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
322 
323    VkResult result = VKCTX(BeginCommandBuffer)(batch->state->cmdbuf, &cbbi);
324    if (result != VK_SUCCESS)
325       mesa_loge("ZINK: vkBeginCommandBuffer failed (%s)", vk_Result_to_str(result));
326 
327    result = VKCTX(BeginCommandBuffer)(batch->state->barrier_cmdbuf, &cbbi);
328    if (result != VK_SUCCESS)
329       mesa_loge("ZINK: vkBeginCommandBuffer failed (%s)", vk_Result_to_str(result));
330 
331    batch->state->fence.completed = false;
332    if (ctx->last_fence) {
333       struct zink_batch_state *last_state = zink_batch_state(ctx->last_fence);
334       batch->last_batch_usage = &last_state->usage;
335    }
336 
337    if (!ctx->queries_disabled)
338       zink_resume_queries(ctx, batch);
339 }
340 
341 static void
post_submit(void * data,void * gdata,int thread_index)342 post_submit(void *data, void *gdata, int thread_index)
343 {
344    struct zink_batch_state *bs = data;
345    struct zink_screen *screen = zink_screen(bs->ctx->base.screen);
346 
347    if (bs->is_device_lost) {
348       if (bs->ctx->reset.reset)
349          bs->ctx->reset.reset(bs->ctx->reset.data, PIPE_GUILTY_CONTEXT_RESET);
350       else if (screen->abort_on_hang && !screen->robust_ctx_count)
351          /* if nothing can save us, abort */
352          abort();
353       screen->device_lost = true;
354    } else if (bs->ctx->batch_states_count > 5000) {
355       zink_screen_timeline_wait(screen, bs->fence.batch_id - 2500, PIPE_TIMEOUT_INFINITE);
356    }
357 }
358 
359 static void
submit_queue(void * data,void * gdata,int thread_index)360 submit_queue(void *data, void *gdata, int thread_index)
361 {
362    struct zink_batch_state *bs = data;
363    struct zink_context *ctx = bs->ctx;
364    struct zink_screen *screen = zink_screen(ctx->base.screen);
365    VkSubmitInfo si[2] = {0};
366    int num_si = 2;
367    while (!bs->fence.batch_id)
368       bs->fence.batch_id = (uint32_t)p_atomic_inc_return(&screen->curr_batch);
369    bs->usage.usage = bs->fence.batch_id;
370    bs->usage.unflushed = false;
371 
372    uint64_t batch_id = bs->fence.batch_id;
373    /* first submit is just for acquire waits since they have a separate array */
374    si[0].sType = si[1].sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
375    si[0].waitSemaphoreCount = util_dynarray_num_elements(&bs->acquires, VkSemaphore);
376    si[0].pWaitSemaphores = bs->acquires.data;
377    while (util_dynarray_num_elements(&bs->acquire_flags, VkPipelineStageFlags) < si[0].waitSemaphoreCount) {
378       VkPipelineStageFlags mask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
379       util_dynarray_append(&bs->acquire_flags, VkPipelineStageFlags, mask);
380    }
381    assert(util_dynarray_num_elements(&bs->acquires, VkSemaphore) <= util_dynarray_num_elements(&bs->acquire_flags, VkPipelineStageFlags));
382    si[0].pWaitDstStageMask = bs->acquire_flags.data;
383 
384    if (si[0].waitSemaphoreCount == 0)
385      num_si--;
386 
387    /* then the real submit */
388    si[1].waitSemaphoreCount = util_dynarray_num_elements(&bs->wait_semaphores, VkSemaphore);
389    si[1].pWaitSemaphores = bs->wait_semaphores.data;
390    si[1].pWaitDstStageMask = bs->wait_semaphore_stages.data;
391    si[1].commandBufferCount = bs->has_barriers ? 2 : 1;
392    VkCommandBuffer cmdbufs[2] = {
393       bs->barrier_cmdbuf,
394       bs->cmdbuf,
395    };
396    si[1].pCommandBuffers = bs->has_barriers ? cmdbufs : &cmdbufs[1];
397 
398    VkSemaphore signals[3];
399    si[1].signalSemaphoreCount = !!bs->signal_semaphore;
400    signals[0] = bs->signal_semaphore;
401    si[1].pSignalSemaphores = signals;
402    VkTimelineSemaphoreSubmitInfo tsi = {0};
403    uint64_t signal_values[2] = {0};
404    tsi.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO;
405    si[1].pNext = &tsi;
406    tsi.pSignalSemaphoreValues = signal_values;
407    signal_values[si[1].signalSemaphoreCount] = batch_id;
408    signals[si[1].signalSemaphoreCount++] = screen->sem;
409    tsi.signalSemaphoreValueCount = si[1].signalSemaphoreCount;
410 
411    if (bs->present)
412       signals[si[1].signalSemaphoreCount++] = bs->present;
413    tsi.signalSemaphoreValueCount = si[1].signalSemaphoreCount;
414 
415    VkResult result = VKSCR(EndCommandBuffer)(bs->cmdbuf);
416    if (result != VK_SUCCESS) {
417       mesa_loge("ZINK: vkEndCommandBuffer failed (%s)", vk_Result_to_str(result));
418       bs->is_device_lost = true;
419       goto end;
420    }
421    if (bs->has_barriers) {
422       result = VKSCR(EndCommandBuffer)(bs->barrier_cmdbuf);
423       if (result != VK_SUCCESS) {
424          mesa_loge("ZINK: vkEndCommandBuffer failed (%s)", vk_Result_to_str(result));
425          bs->is_device_lost = true;
426          goto end;
427       }
428    }
429 
430    while (util_dynarray_contains(&bs->persistent_resources, struct zink_resource_object*)) {
431       struct zink_resource_object *obj = util_dynarray_pop(&bs->persistent_resources, struct zink_resource_object*);
432        VkMappedMemoryRange range = zink_resource_init_mem_range(screen, obj, 0, obj->size);
433 
434        result = VKSCR(FlushMappedMemoryRanges)(screen->dev, 1, &range);
435        if (result != VK_SUCCESS) {
436           mesa_loge("ZINK: vkFlushMappedMemoryRanges failed (%s)", vk_Result_to_str(result));
437        }
438    }
439 
440    simple_mtx_lock(&screen->queue_lock);
441    result = VKSCR(QueueSubmit)(screen->queue, num_si, num_si == 2 ? si : &si[1], VK_NULL_HANDLE);
442    if (result != VK_SUCCESS) {
443       mesa_loge("ZINK: vkQueueSubmit failed (%s)", vk_Result_to_str(result));
444       bs->is_device_lost = true;
445    }
446    simple_mtx_unlock(&screen->queue_lock);
447    bs->submit_count++;
448 end:
449    cnd_broadcast(&bs->usage.flush);
450 
451    p_atomic_set(&bs->fence.submitted, true);
452    unref_resources(screen, bs);
453 }
454 
455 void
zink_end_batch(struct zink_context * ctx,struct zink_batch * batch)456 zink_end_batch(struct zink_context *ctx, struct zink_batch *batch)
457 {
458    if (!ctx->queries_disabled)
459       zink_suspend_queries(ctx, batch);
460 
461    tc_driver_internal_flush_notify(ctx->tc);
462 
463    struct zink_screen *screen = zink_screen(ctx->base.screen);
464    struct zink_batch_state *bs;
465 
466    if (ctx->oom_flush || ctx->batch_states_count > 10) {
467       assert(!ctx->batch_states_count || ctx->batch_states);
468       while (ctx->batch_states) {
469          bs = ctx->batch_states;
470          struct zink_fence *fence = &bs->fence;
471          /* once an incomplete state is reached, no more will be complete */
472          if (!zink_check_batch_completion(ctx, fence->batch_id))
473             break;
474 
475          pop_batch_state(ctx);
476          zink_reset_batch_state(ctx, bs);
477          util_dynarray_append(&ctx->free_batch_states, struct zink_batch_state *, bs);
478       }
479       if (ctx->batch_states_count > 50)
480          ctx->oom_flush = true;
481    }
482 
483    bs = batch->state;
484    if (ctx->last_fence)
485       zink_batch_state(ctx->last_fence)->next = bs;
486    else {
487       assert(!ctx->batch_states);
488       ctx->batch_states = bs;
489    }
490    ctx->last_fence = &bs->fence;
491    ctx->batch_states_count++;
492    batch->work_count = 0;
493 
494    if (batch->swapchain) {
495       if (zink_kopper_acquired(batch->swapchain->obj->dt, batch->swapchain->obj->dt_idx) && !batch->swapchain->obj->present) {
496          batch->state->present = zink_kopper_present(screen, batch->swapchain);
497          batch->state->swapchain = batch->swapchain;
498       }
499       batch->swapchain = NULL;
500    }
501 
502    if (screen->device_lost)
503       return;
504 
505    if (screen->threaded) {
506       util_queue_add_job(&screen->flush_queue, bs, &bs->flush_completed,
507                          submit_queue, post_submit, 0);
508    } else {
509       submit_queue(bs, NULL, 0);
510       post_submit(bs, NULL, 0);
511    }
512 }
513 
514 void
zink_batch_resource_usage_set(struct zink_batch * batch,struct zink_resource * res,bool write)515 zink_batch_resource_usage_set(struct zink_batch *batch, struct zink_resource *res, bool write)
516 {
517    if (res->obj->dt) {
518       VkSemaphore acquire = zink_kopper_acquire_submit(zink_screen(batch->state->ctx->base.screen), res);
519       if (acquire)
520          util_dynarray_append(&batch->state->acquires, VkSemaphore, acquire);
521    }
522    if (write && !res->obj->is_buffer) {
523       if (!res->valid && res->fb_binds)
524          batch->state->ctx->rp_loadop_changed = true;
525       res->valid = true;
526    }
527    zink_resource_usage_set(res, batch->state, write);
528    /* multiple array entries are fine */
529    if (!res->obj->coherent && res->obj->persistent_maps)
530       util_dynarray_append(&batch->state->persistent_resources, struct zink_resource_object*, res->obj);
531 
532    batch->has_work = true;
533 }
534 
535 void
zink_batch_reference_resource_rw(struct zink_batch * batch,struct zink_resource * res,bool write)536 zink_batch_reference_resource_rw(struct zink_batch *batch, struct zink_resource *res, bool write)
537 {
538    /* if the resource already has usage of any sort set for this batch, */
539    if (!zink_resource_usage_matches(res, batch->state) ||
540        /* or if it's bound somewhere */
541        !zink_resource_has_binds(res))
542       /* then it already has a batch ref and doesn't need one here */
543       zink_batch_reference_resource(batch, res);
544    zink_batch_resource_usage_set(batch, res, write);
545 }
546 
547 void
zink_batch_add_wait_semaphore(struct zink_batch * batch,VkSemaphore sem)548 zink_batch_add_wait_semaphore(struct zink_batch *batch, VkSemaphore sem)
549 {
550    util_dynarray_append(&batch->state->acquires, VkSemaphore, sem);
551 }
552 
553 bool
batch_ptr_add_usage(struct zink_batch * batch,struct set * s,void * ptr)554 batch_ptr_add_usage(struct zink_batch *batch, struct set *s, void *ptr)
555 {
556    bool found = false;
557    _mesa_set_search_or_add(s, ptr, &found);
558    return !found;
559 }
560 
561 ALWAYS_INLINE static void
check_oom_flush(struct zink_context * ctx,const struct zink_batch * batch)562 check_oom_flush(struct zink_context *ctx, const struct zink_batch *batch)
563 {
564    const VkDeviceSize resource_size = batch->state->resource_size;
565    if (resource_size >= zink_screen(ctx->base.screen)->clamp_video_mem) {
566        ctx->oom_flush = true;
567        ctx->oom_stall = true;
568     }
569 }
570 
571 void
zink_batch_reference_resource(struct zink_batch * batch,struct zink_resource * res)572 zink_batch_reference_resource(struct zink_batch *batch, struct zink_resource *res)
573 {
574    if (!batch_ptr_add_usage(batch, batch->state->resources, res->obj))
575       return;
576    pipe_reference(NULL, &res->obj->reference);
577    batch->state->resource_size += res->obj->size;
578    check_oom_flush(batch->state->ctx, batch);
579    batch->has_work = true;
580 }
581 
582 void
zink_batch_reference_resource_move(struct zink_batch * batch,struct zink_resource * res)583 zink_batch_reference_resource_move(struct zink_batch *batch, struct zink_resource *res)
584 {
585    if (!batch_ptr_add_usage(batch, batch->state->resources, res->obj))
586       return;
587    batch->state->resource_size += res->obj->size;
588    check_oom_flush(batch->state->ctx, batch);
589    batch->has_work = true;
590 }
591 
592 void
zink_batch_reference_bufferview(struct zink_batch * batch,struct zink_buffer_view * buffer_view)593 zink_batch_reference_bufferview(struct zink_batch *batch, struct zink_buffer_view *buffer_view)
594 {
595    if (!batch_ptr_add_usage(batch, batch->state->bufferviews, buffer_view))
596       return;
597    pipe_reference(NULL, &buffer_view->reference);
598    batch->has_work = true;
599 }
600 
601 void
zink_batch_reference_surface(struct zink_batch * batch,struct zink_surface * surface)602 zink_batch_reference_surface(struct zink_batch *batch, struct zink_surface *surface)
603 {
604    if (!batch_ptr_add_usage(batch, batch->state->surfaces, surface))
605       return;
606    struct pipe_surface *surf = NULL;
607    pipe_surface_reference(&surf, &surface->base);
608    batch->has_work = true;
609 }
610 
611 void
zink_batch_reference_sampler_view(struct zink_batch * batch,struct zink_sampler_view * sv)612 zink_batch_reference_sampler_view(struct zink_batch *batch,
613                                   struct zink_sampler_view *sv)
614 {
615    if (sv->base.target == PIPE_BUFFER)
616       zink_batch_reference_bufferview(batch, sv->buffer_view);
617    else {
618       zink_batch_reference_surface(batch, sv->image_view);
619       if (sv->cube_array)
620          zink_batch_reference_surface(batch, sv->cube_array);
621    }
622 }
623 
624 void
zink_batch_reference_program(struct zink_batch * batch,struct zink_program * pg)625 zink_batch_reference_program(struct zink_batch *batch,
626                              struct zink_program *pg)
627 {
628    if (zink_batch_usage_matches(pg->batch_uses, batch->state) ||
629        !batch_ptr_add_usage(batch, batch->state->programs, pg))
630       return;
631    pipe_reference(NULL, &pg->reference);
632    zink_batch_usage_set(&pg->batch_uses, batch->state);
633    batch->has_work = true;
634 }
635 
636 void
zink_batch_reference_image_view(struct zink_batch * batch,struct zink_image_view * image_view)637 zink_batch_reference_image_view(struct zink_batch *batch,
638                                 struct zink_image_view *image_view)
639 {
640    if (image_view->base.resource->target == PIPE_BUFFER)
641       zink_batch_reference_bufferview(batch, image_view->buffer_view);
642    else
643       zink_batch_reference_surface(batch, image_view->surface);
644 }
645 
646 bool
zink_screen_usage_check_completion(struct zink_screen * screen,const struct zink_batch_usage * u)647 zink_screen_usage_check_completion(struct zink_screen *screen, const struct zink_batch_usage *u)
648 {
649    if (!zink_batch_usage_exists(u))
650       return true;
651    if (zink_batch_usage_is_unflushed(u))
652       return false;
653 
654    return zink_screen_timeline_wait(screen, u->usage, 0);
655 }
656 
657 bool
zink_batch_usage_check_completion(struct zink_context * ctx,const struct zink_batch_usage * u)658 zink_batch_usage_check_completion(struct zink_context *ctx, const struct zink_batch_usage *u)
659 {
660    if (!zink_batch_usage_exists(u))
661       return true;
662    if (zink_batch_usage_is_unflushed(u))
663       return false;
664    return zink_check_batch_completion(ctx, u->usage);
665 }
666 
667 void
zink_batch_usage_wait(struct zink_context * ctx,struct zink_batch_usage * u)668 zink_batch_usage_wait(struct zink_context *ctx, struct zink_batch_usage *u)
669 {
670    if (!zink_batch_usage_exists(u))
671       return;
672    if (zink_batch_usage_is_unflushed(u)) {
673       if (likely(u == &ctx->batch.state->usage))
674          ctx->base.flush(&ctx->base, NULL, PIPE_FLUSH_HINT_FINISH);
675       else { //multi-context
676          mtx_lock(&u->mtx);
677          cnd_wait(&u->flush, &u->mtx);
678          mtx_unlock(&u->mtx);
679       }
680    }
681    zink_wait_on_batch(ctx, u->usage);
682 }
683