• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright © 2016 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining
6  * a copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
14  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
15  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
16  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
17  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20  * USE OR OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * The above copyright notice and this permission notice (including the
23  * next paragraph) shall be included in all copies or substantial portions
24  * of the Software.
25  */
26 
27 #include "u_queue.h"
28 
29 #include "c11/threads.h"
30 #include "util/u_cpu_detect.h"
31 #include "util/os_time.h"
32 #include "util/u_string.h"
33 #include "util/u_thread.h"
34 #include "u_process.h"
35 
36 #if defined(__linux__)
37 #include <sys/time.h>
38 #include <sys/resource.h>
39 #include <sys/syscall.h>
40 #endif
41 
42 
43 /* Define 256MB */
44 #define S_256MB (256 * 1024 * 1024)
45 
46 static void
47 util_queue_kill_threads(struct util_queue *queue, unsigned keep_num_threads,
48                         bool finish_locked);
49 
50 /****************************************************************************
51  * Wait for all queues to assert idle when exit() is called.
52  *
53  * Otherwise, C++ static variable destructors can be called while threads
54  * are using the static variables.
55  */
56 
57 static once_flag atexit_once_flag = ONCE_FLAG_INIT;
58 static struct list_head queue_list;
59 static mtx_t exit_mutex = _MTX_INITIALIZER_NP;
60 
61 static void
atexit_handler(void)62 atexit_handler(void)
63 {
64    struct util_queue *iter;
65 
66    mtx_lock(&exit_mutex);
67    /* Wait for all queues to assert idle. */
68    LIST_FOR_EACH_ENTRY(iter, &queue_list, head) {
69       util_queue_kill_threads(iter, 0, false);
70    }
71    mtx_unlock(&exit_mutex);
72 }
73 
74 static void
global_init(void)75 global_init(void)
76 {
77    list_inithead(&queue_list);
78    atexit(atexit_handler);
79 }
80 
81 static void
add_to_atexit_list(struct util_queue * queue)82 add_to_atexit_list(struct util_queue *queue)
83 {
84    call_once(&atexit_once_flag, global_init);
85 
86    mtx_lock(&exit_mutex);
87    list_add(&queue->head, &queue_list);
88    mtx_unlock(&exit_mutex);
89 }
90 
91 static void
remove_from_atexit_list(struct util_queue * queue)92 remove_from_atexit_list(struct util_queue *queue)
93 {
94    struct util_queue *iter, *tmp;
95 
96    mtx_lock(&exit_mutex);
97    LIST_FOR_EACH_ENTRY_SAFE(iter, tmp, &queue_list, head) {
98       if (iter == queue) {
99          list_del(&iter->head);
100          break;
101       }
102    }
103    mtx_unlock(&exit_mutex);
104 }
105 
106 /****************************************************************************
107  * util_queue_fence
108  */
109 
110 #ifdef UTIL_QUEUE_FENCE_FUTEX
111 static bool
do_futex_fence_wait(struct util_queue_fence * fence,bool timeout,int64_t abs_timeout)112 do_futex_fence_wait(struct util_queue_fence *fence,
113                     bool timeout, int64_t abs_timeout)
114 {
115    uint32_t v = p_atomic_read_relaxed(&fence->val);
116    struct timespec ts;
117    ts.tv_sec = abs_timeout / (1000*1000*1000);
118    ts.tv_nsec = abs_timeout % (1000*1000*1000);
119 
120    while (v != 0) {
121       if (v != 2) {
122          v = p_atomic_cmpxchg(&fence->val, 1, 2);
123          if (v == 0)
124             return true;
125       }
126 
127       int r = futex_wait(&fence->val, 2, timeout ? &ts : NULL);
128       if (timeout && r < 0) {
129          if (errno == ETIMEDOUT)
130             return false;
131       }
132 
133       v = p_atomic_read_relaxed(&fence->val);
134    }
135 
136    return true;
137 }
138 
139 void
_util_queue_fence_wait(struct util_queue_fence * fence)140 _util_queue_fence_wait(struct util_queue_fence *fence)
141 {
142    do_futex_fence_wait(fence, false, 0);
143 }
144 
145 bool
_util_queue_fence_wait_timeout(struct util_queue_fence * fence,int64_t abs_timeout)146 _util_queue_fence_wait_timeout(struct util_queue_fence *fence,
147                                int64_t abs_timeout)
148 {
149    return do_futex_fence_wait(fence, true, abs_timeout);
150 }
151 
152 #endif
153 
154 #ifdef UTIL_QUEUE_FENCE_STANDARD
155 void
util_queue_fence_signal(struct util_queue_fence * fence)156 util_queue_fence_signal(struct util_queue_fence *fence)
157 {
158    mtx_lock(&fence->mutex);
159    fence->signalled = true;
160    cnd_broadcast(&fence->cond);
161    mtx_unlock(&fence->mutex);
162 }
163 
164 void
_util_queue_fence_wait(struct util_queue_fence * fence)165 _util_queue_fence_wait(struct util_queue_fence *fence)
166 {
167    mtx_lock(&fence->mutex);
168    while (!fence->signalled)
169       cnd_wait(&fence->cond, &fence->mutex);
170    mtx_unlock(&fence->mutex);
171 }
172 
173 bool
_util_queue_fence_wait_timeout(struct util_queue_fence * fence,int64_t abs_timeout)174 _util_queue_fence_wait_timeout(struct util_queue_fence *fence,
175                                int64_t abs_timeout)
176 {
177    /* This terrible hack is made necessary by the fact that we really want an
178     * internal interface consistent with os_time_*, but cnd_timedwait is spec'd
179     * to be relative to the TIME_UTC clock.
180     */
181    int64_t rel = abs_timeout - os_time_get_nano();
182 
183    if (rel > 0) {
184       struct timespec ts;
185 
186 #if defined(HAVE_TIMESPEC_GET) || defined(_WIN32)
187       timespec_get(&ts, TIME_UTC);
188 #else
189       clock_gettime(CLOCK_REALTIME, &ts);
190 #endif
191 
192       ts.tv_sec += abs_timeout / (1000*1000*1000);
193       ts.tv_nsec += abs_timeout % (1000*1000*1000);
194       if (ts.tv_nsec >= (1000*1000*1000)) {
195          ts.tv_sec++;
196          ts.tv_nsec -= (1000*1000*1000);
197       }
198 
199       mtx_lock(&fence->mutex);
200       while (!fence->signalled) {
201          if (cnd_timedwait(&fence->cond, &fence->mutex, &ts) != thrd_success)
202             break;
203       }
204       mtx_unlock(&fence->mutex);
205    }
206 
207    return fence->signalled;
208 }
209 
210 void
util_queue_fence_init(struct util_queue_fence * fence)211 util_queue_fence_init(struct util_queue_fence *fence)
212 {
213    memset(fence, 0, sizeof(*fence));
214    (void) mtx_init(&fence->mutex, mtx_plain);
215    cnd_init(&fence->cond);
216    fence->signalled = true;
217 }
218 
219 void
util_queue_fence_destroy(struct util_queue_fence * fence)220 util_queue_fence_destroy(struct util_queue_fence *fence)
221 {
222    assert(fence->signalled);
223 
224    /* Ensure that another thread is not in the middle of
225     * util_queue_fence_signal (having set the fence to signalled but still
226     * holding the fence mutex).
227     *
228     * A common contract between threads is that as soon as a fence is signalled
229     * by thread A, thread B is allowed to destroy it. Since
230     * util_queue_fence_is_signalled does not lock the fence mutex (for
231     * performance reasons), we must do so here.
232     */
233    mtx_lock(&fence->mutex);
234    mtx_unlock(&fence->mutex);
235 
236    cnd_destroy(&fence->cond);
237    mtx_destroy(&fence->mutex);
238 }
239 #endif
240 
241 /****************************************************************************
242  * util_queue implementation
243  */
244 
245 struct thread_input {
246    struct util_queue *queue;
247    int thread_index;
248 };
249 
250 static int
util_queue_thread_func(void * input)251 util_queue_thread_func(void *input)
252 {
253    struct util_queue *queue = ((struct thread_input*)input)->queue;
254    int thread_index = ((struct thread_input*)input)->thread_index;
255 
256    free(input);
257 
258    if (queue->flags & UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY) {
259       /* Don't inherit the thread affinity from the parent thread.
260        * Set the full mask.
261        */
262       uint32_t mask[UTIL_MAX_CPUS / 32];
263 
264       memset(mask, 0xff, sizeof(mask));
265 
266       /* Ensure util_cpu_caps.num_cpu_mask_bits is initialized: */
267       util_cpu_detect();
268 
269       util_set_current_thread_affinity(mask, NULL,
270                                        util_get_cpu_caps()->num_cpu_mask_bits);
271    }
272 
273 #if defined(__linux__)
274    if (queue->flags & UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY) {
275       /* The nice() function can only set a maximum of 19. */
276       setpriority(PRIO_PROCESS, syscall(SYS_gettid), 19);
277    }
278 #endif
279 
280    if (strlen(queue->name) > 0) {
281       char name[16];
282       snprintf(name, sizeof(name), "%s%i", queue->name, thread_index);
283       u_thread_setname(name);
284    }
285 
286    while (1) {
287       struct util_queue_job job;
288 
289       mtx_lock(&queue->lock);
290       assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
291 
292       /* wait if the queue is empty */
293       while (thread_index < queue->num_threads && queue->num_queued == 0)
294          cnd_wait(&queue->has_queued_cond, &queue->lock);
295 
296       /* only kill threads that are above "num_threads" */
297       if (thread_index >= queue->num_threads) {
298          mtx_unlock(&queue->lock);
299          break;
300       }
301 
302       job = queue->jobs[queue->read_idx];
303       memset(&queue->jobs[queue->read_idx], 0, sizeof(struct util_queue_job));
304       queue->read_idx = (queue->read_idx + 1) % queue->max_jobs;
305 
306       queue->num_queued--;
307       cnd_signal(&queue->has_space_cond);
308       if (job.job)
309          queue->total_jobs_size -= job.job_size;
310       mtx_unlock(&queue->lock);
311 
312       if (job.job) {
313          job.execute(job.job, job.global_data, thread_index);
314          if (job.fence)
315             util_queue_fence_signal(job.fence);
316          if (job.cleanup)
317             job.cleanup(job.job, job.global_data, thread_index);
318       }
319    }
320 
321    /* signal remaining jobs if all threads are being terminated */
322    mtx_lock(&queue->lock);
323    if (queue->num_threads == 0) {
324       for (unsigned i = queue->read_idx; i != queue->write_idx;
325            i = (i + 1) % queue->max_jobs) {
326          if (queue->jobs[i].job) {
327             if (queue->jobs[i].fence)
328                util_queue_fence_signal(queue->jobs[i].fence);
329             queue->jobs[i].job = NULL;
330          }
331       }
332       queue->read_idx = queue->write_idx;
333       queue->num_queued = 0;
334    }
335    mtx_unlock(&queue->lock);
336    return 0;
337 }
338 
339 static bool
util_queue_create_thread(struct util_queue * queue,unsigned index)340 util_queue_create_thread(struct util_queue *queue, unsigned index)
341 {
342    struct thread_input *input =
343       (struct thread_input *) malloc(sizeof(struct thread_input));
344    input->queue = queue;
345    input->thread_index = index;
346 
347    queue->threads[index] = u_thread_create(util_queue_thread_func, input);
348 
349    if (!queue->threads[index]) {
350       free(input);
351       return false;
352    }
353 
354    if (queue->flags & UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY) {
355 #if defined(__linux__) && defined(SCHED_BATCH)
356       struct sched_param sched_param = {0};
357 
358       /* The nice() function can only set a maximum of 19.
359        * SCHED_BATCH gives the scheduler a hint that this is a latency
360        * insensitive thread.
361        *
362        * Note that Linux only allows decreasing the priority. The original
363        * priority can't be restored.
364        */
365       pthread_setschedparam(queue->threads[index], SCHED_BATCH, &sched_param);
366 #endif
367    }
368    return true;
369 }
370 
371 void
util_queue_adjust_num_threads(struct util_queue * queue,unsigned num_threads)372 util_queue_adjust_num_threads(struct util_queue *queue, unsigned num_threads)
373 {
374    num_threads = MIN2(num_threads, queue->max_threads);
375    num_threads = MAX2(num_threads, 1);
376 
377    simple_mtx_lock(&queue->finish_lock);
378    unsigned old_num_threads = queue->num_threads;
379 
380    if (num_threads == old_num_threads) {
381       simple_mtx_unlock(&queue->finish_lock);
382       return;
383    }
384 
385    if (num_threads < old_num_threads) {
386       util_queue_kill_threads(queue, num_threads, true);
387       simple_mtx_unlock(&queue->finish_lock);
388       return;
389    }
390 
391    /* Create threads.
392     *
393     * We need to update num_threads first, because threads terminate
394     * when thread_index < num_threads.
395     */
396    queue->num_threads = num_threads;
397    for (unsigned i = old_num_threads; i < num_threads; i++) {
398       if (!util_queue_create_thread(queue, i))
399          break;
400    }
401    simple_mtx_unlock(&queue->finish_lock);
402 }
403 
404 bool
util_queue_init(struct util_queue * queue,const char * name,unsigned max_jobs,unsigned num_threads,unsigned flags,void * global_data)405 util_queue_init(struct util_queue *queue,
406                 const char *name,
407                 unsigned max_jobs,
408                 unsigned num_threads,
409                 unsigned flags,
410                 void *global_data)
411 {
412    unsigned i;
413 
414    /* Form the thread name from process_name and name, limited to 13
415     * characters. Characters 14-15 are reserved for the thread number.
416     * Character 16 should be 0. Final form: "process:name12"
417     *
418     * If name is too long, it's truncated. If any space is left, the process
419     * name fills it.
420     */
421    const char *process_name = util_get_process_name();
422    int process_len = process_name ? strlen(process_name) : 0;
423    int name_len = strlen(name);
424    const int max_chars = sizeof(queue->name) - 1;
425 
426    name_len = MIN2(name_len, max_chars);
427 
428    /* See if there is any space left for the process name, reserve 1 for
429     * the colon. */
430    process_len = MIN2(process_len, max_chars - name_len - 1);
431    process_len = MAX2(process_len, 0);
432 
433    memset(queue, 0, sizeof(*queue));
434 
435    if (process_len) {
436       snprintf(queue->name, sizeof(queue->name), "%.*s:%s",
437                process_len, process_name, name);
438    } else {
439       snprintf(queue->name, sizeof(queue->name), "%s", name);
440    }
441 
442    queue->flags = flags;
443    queue->max_threads = num_threads;
444    queue->num_threads = (flags & UTIL_QUEUE_INIT_SCALE_THREADS) ? 1 : num_threads;
445    queue->max_jobs = max_jobs;
446    queue->global_data = global_data;
447 
448    (void) mtx_init(&queue->lock, mtx_plain);
449    (void) simple_mtx_init(&queue->finish_lock, mtx_plain);
450 
451    queue->num_queued = 0;
452    cnd_init(&queue->has_queued_cond);
453    cnd_init(&queue->has_space_cond);
454 
455    queue->jobs = (struct util_queue_job*)
456                  calloc(max_jobs, sizeof(struct util_queue_job));
457    if (!queue->jobs)
458       goto fail;
459 
460    queue->threads = (thrd_t*) calloc(queue->max_threads, sizeof(thrd_t));
461    if (!queue->threads)
462       goto fail;
463 
464    /* start threads */
465    for (i = 0; i < queue->num_threads; i++) {
466       if (!util_queue_create_thread(queue, i)) {
467          if (i == 0) {
468             /* no threads created, fail */
469             goto fail;
470          } else {
471             /* at least one thread created, so use it */
472             queue->num_threads = i;
473             break;
474          }
475       }
476    }
477 
478    add_to_atexit_list(queue);
479    return true;
480 
481 fail:
482    free(queue->threads);
483 
484    if (queue->jobs) {
485       cnd_destroy(&queue->has_space_cond);
486       cnd_destroy(&queue->has_queued_cond);
487       mtx_destroy(&queue->lock);
488       free(queue->jobs);
489    }
490    /* also util_queue_is_initialized can be used to check for success */
491    memset(queue, 0, sizeof(*queue));
492    return false;
493 }
494 
495 static void
util_queue_kill_threads(struct util_queue * queue,unsigned keep_num_threads,bool finish_locked)496 util_queue_kill_threads(struct util_queue *queue, unsigned keep_num_threads,
497                         bool finish_locked)
498 {
499    unsigned i;
500 
501    /* Signal all threads to terminate. */
502    if (!finish_locked)
503       simple_mtx_lock(&queue->finish_lock);
504 
505    if (keep_num_threads >= queue->num_threads) {
506       simple_mtx_unlock(&queue->finish_lock);
507       return;
508    }
509 
510    mtx_lock(&queue->lock);
511    unsigned old_num_threads = queue->num_threads;
512    /* Setting num_threads is what causes the threads to terminate.
513     * Then cnd_broadcast wakes them up and they will exit their function.
514     */
515    queue->num_threads = keep_num_threads;
516    cnd_broadcast(&queue->has_queued_cond);
517    mtx_unlock(&queue->lock);
518 
519    for (i = keep_num_threads; i < old_num_threads; i++)
520       thrd_join(queue->threads[i], NULL);
521 
522    if (!finish_locked)
523       simple_mtx_unlock(&queue->finish_lock);
524 }
525 
526 static void
util_queue_finish_execute(void * data,void * gdata,int num_thread)527 util_queue_finish_execute(void *data, void *gdata, int num_thread)
528 {
529    util_barrier *barrier = data;
530    util_barrier_wait(barrier);
531 }
532 
533 void
util_queue_destroy(struct util_queue * queue)534 util_queue_destroy(struct util_queue *queue)
535 {
536    util_queue_kill_threads(queue, 0, false);
537 
538    /* This makes it safe to call on a queue that failedutil_queue_init. */
539    if (queue->head.next != NULL)
540       remove_from_atexit_list(queue);
541 
542    cnd_destroy(&queue->has_space_cond);
543    cnd_destroy(&queue->has_queued_cond);
544    simple_mtx_destroy(&queue->finish_lock);
545    mtx_destroy(&queue->lock);
546    free(queue->jobs);
547    free(queue->threads);
548 }
549 
550 void
util_queue_add_job(struct util_queue * queue,void * job,struct util_queue_fence * fence,util_queue_execute_func execute,util_queue_execute_func cleanup,const size_t job_size)551 util_queue_add_job(struct util_queue *queue,
552                    void *job,
553                    struct util_queue_fence *fence,
554                    util_queue_execute_func execute,
555                    util_queue_execute_func cleanup,
556                    const size_t job_size)
557 {
558    struct util_queue_job *ptr;
559 
560    mtx_lock(&queue->lock);
561    if (queue->num_threads == 0) {
562       mtx_unlock(&queue->lock);
563       /* well no good option here, but any leaks will be
564        * short-lived as things are shutting down..
565        */
566       return;
567    }
568 
569    if (fence)
570       util_queue_fence_reset(fence);
571 
572    assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs);
573 
574 
575    if (queue->num_queued == queue->max_jobs) {
576       if ((queue->flags & UTIL_QUEUE_INIT_SCALE_THREADS) &&
577           execute != util_queue_finish_execute &&
578           queue->num_threads < queue->max_threads) {
579          util_queue_adjust_num_threads(queue, queue->num_threads + 1);
580       }
581 
582       if (queue->flags & UTIL_QUEUE_INIT_RESIZE_IF_FULL &&
583           queue->total_jobs_size + job_size < S_256MB) {
584          /* If the queue is full, make it larger to avoid waiting for a free
585           * slot.
586           */
587          unsigned new_max_jobs = queue->max_jobs + 8;
588          struct util_queue_job *jobs =
589             (struct util_queue_job*)calloc(new_max_jobs,
590                                            sizeof(struct util_queue_job));
591          assert(jobs);
592 
593          /* Copy all queued jobs into the new list. */
594          unsigned num_jobs = 0;
595          unsigned i = queue->read_idx;
596 
597          do {
598             jobs[num_jobs++] = queue->jobs[i];
599             i = (i + 1) % queue->max_jobs;
600          } while (i != queue->write_idx);
601 
602          assert(num_jobs == queue->num_queued);
603 
604          free(queue->jobs);
605          queue->jobs = jobs;
606          queue->read_idx = 0;
607          queue->write_idx = num_jobs;
608          queue->max_jobs = new_max_jobs;
609       } else {
610          /* Wait until there is a free slot. */
611          while (queue->num_queued == queue->max_jobs)
612             cnd_wait(&queue->has_space_cond, &queue->lock);
613       }
614    }
615 
616    ptr = &queue->jobs[queue->write_idx];
617    assert(ptr->job == NULL);
618    ptr->job = job;
619    ptr->global_data = queue->global_data;
620    ptr->fence = fence;
621    ptr->execute = execute;
622    ptr->cleanup = cleanup;
623    ptr->job_size = job_size;
624 
625    queue->write_idx = (queue->write_idx + 1) % queue->max_jobs;
626    queue->total_jobs_size += ptr->job_size;
627 
628    queue->num_queued++;
629    cnd_signal(&queue->has_queued_cond);
630    mtx_unlock(&queue->lock);
631 }
632 
633 /**
634  * Remove a queued job. If the job hasn't started execution, it's removed from
635  * the queue. If the job has started execution, the function waits for it to
636  * complete.
637  *
638  * In all cases, the fence is signalled when the function returns.
639  *
640  * The function can be used when destroying an object associated with the job
641  * when you don't care about the job completion state.
642  */
643 void
util_queue_drop_job(struct util_queue * queue,struct util_queue_fence * fence)644 util_queue_drop_job(struct util_queue *queue, struct util_queue_fence *fence)
645 {
646    bool removed = false;
647 
648    if (util_queue_fence_is_signalled(fence))
649       return;
650 
651    mtx_lock(&queue->lock);
652    for (unsigned i = queue->read_idx; i != queue->write_idx;
653         i = (i + 1) % queue->max_jobs) {
654       if (queue->jobs[i].fence == fence) {
655          if (queue->jobs[i].cleanup)
656             queue->jobs[i].cleanup(queue->jobs[i].job, queue->global_data, -1);
657 
658          /* Just clear it. The threads will treat as a no-op job. */
659          memset(&queue->jobs[i], 0, sizeof(queue->jobs[i]));
660          removed = true;
661          break;
662       }
663    }
664    mtx_unlock(&queue->lock);
665 
666    if (removed)
667       util_queue_fence_signal(fence);
668    else
669       util_queue_fence_wait(fence);
670 }
671 
672 /**
673  * Wait until all previously added jobs have completed.
674  */
675 void
util_queue_finish(struct util_queue * queue)676 util_queue_finish(struct util_queue *queue)
677 {
678    util_barrier barrier;
679    struct util_queue_fence *fences;
680 
681    /* If 2 threads were adding jobs for 2 different barries at the same time,
682     * a deadlock would happen, because 1 barrier requires that all threads
683     * wait for it exclusively.
684     */
685    simple_mtx_lock(&queue->finish_lock);
686 
687    /* The number of threads can be changed to 0, e.g. by the atexit handler. */
688    if (!queue->num_threads) {
689       simple_mtx_unlock(&queue->finish_lock);
690       return;
691    }
692 
693    fences = malloc(queue->num_threads * sizeof(*fences));
694    util_barrier_init(&barrier, queue->num_threads);
695 
696    for (unsigned i = 0; i < queue->num_threads; ++i) {
697       util_queue_fence_init(&fences[i]);
698       util_queue_add_job(queue, &barrier, &fences[i],
699                          util_queue_finish_execute, NULL, 0);
700    }
701 
702    for (unsigned i = 0; i < queue->num_threads; ++i) {
703       util_queue_fence_wait(&fences[i]);
704       util_queue_fence_destroy(&fences[i]);
705    }
706    simple_mtx_unlock(&queue->finish_lock);
707 
708    util_barrier_destroy(&barrier);
709 
710    free(fences);
711 }
712 
713 int64_t
util_queue_get_thread_time_nano(struct util_queue * queue,unsigned thread_index)714 util_queue_get_thread_time_nano(struct util_queue *queue, unsigned thread_index)
715 {
716    /* Allow some flexibility by not raising an error. */
717    if (thread_index >= queue->num_threads)
718       return 0;
719 
720    return util_thread_get_time_nano(queue->threads[thread_index]);
721 }
722