• Home
  • Raw
  • Download

Lines Matching full:queue

47 util_queue_kill_threads(struct util_queue *queue, unsigned keep_num_threads,
82 add_to_atexit_list(struct util_queue *queue) in add_to_atexit_list() argument
87 list_add(&queue->head, &queue_list); in add_to_atexit_list()
92 remove_from_atexit_list(struct util_queue *queue) in remove_from_atexit_list() argument
98 if (iter == queue) { in remove_from_atexit_list()
246 struct util_queue *queue; member
253 struct util_queue *queue = ((struct thread_input*)input)->queue; in util_queue_thread_func() local
258 if (queue->flags & UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY) { in util_queue_thread_func()
274 if (queue->flags & UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY) { in util_queue_thread_func()
280 if (strlen(queue->name) > 0) { in util_queue_thread_func()
282 snprintf(name, sizeof(name), "%s%i", queue->name, thread_index); in util_queue_thread_func()
289 mtx_lock(&queue->lock); in util_queue_thread_func()
290 assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs); in util_queue_thread_func()
292 /* wait if the queue is empty */ in util_queue_thread_func()
293 while (thread_index < queue->num_threads && queue->num_queued == 0) in util_queue_thread_func()
294 cnd_wait(&queue->has_queued_cond, &queue->lock); in util_queue_thread_func()
297 if (thread_index >= queue->num_threads) { in util_queue_thread_func()
298 mtx_unlock(&queue->lock); in util_queue_thread_func()
302 job = queue->jobs[queue->read_idx]; in util_queue_thread_func()
303 memset(&queue->jobs[queue->read_idx], 0, sizeof(struct util_queue_job)); in util_queue_thread_func()
304 queue->read_idx = (queue->read_idx + 1) % queue->max_jobs; in util_queue_thread_func()
306 queue->num_queued--; in util_queue_thread_func()
307 cnd_signal(&queue->has_space_cond); in util_queue_thread_func()
309 queue->total_jobs_size -= job.job_size; in util_queue_thread_func()
310 mtx_unlock(&queue->lock); in util_queue_thread_func()
322 mtx_lock(&queue->lock); in util_queue_thread_func()
323 if (queue->num_threads == 0) { in util_queue_thread_func()
324 for (unsigned i = queue->read_idx; i != queue->write_idx; in util_queue_thread_func()
325 i = (i + 1) % queue->max_jobs) { in util_queue_thread_func()
326 if (queue->jobs[i].job) { in util_queue_thread_func()
327 if (queue->jobs[i].fence) in util_queue_thread_func()
328 util_queue_fence_signal(queue->jobs[i].fence); in util_queue_thread_func()
329 queue->jobs[i].job = NULL; in util_queue_thread_func()
332 queue->read_idx = queue->write_idx; in util_queue_thread_func()
333 queue->num_queued = 0; in util_queue_thread_func()
335 mtx_unlock(&queue->lock); in util_queue_thread_func()
340 util_queue_create_thread(struct util_queue *queue, unsigned index) in util_queue_create_thread() argument
344 input->queue = queue; in util_queue_create_thread()
347 queue->threads[index] = u_thread_create(util_queue_thread_func, input); in util_queue_create_thread()
349 if (!queue->threads[index]) { in util_queue_create_thread()
354 if (queue->flags & UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY) { in util_queue_create_thread()
365 pthread_setschedparam(queue->threads[index], SCHED_BATCH, &sched_param); in util_queue_create_thread()
372 util_queue_adjust_num_threads(struct util_queue *queue, unsigned num_threads) in util_queue_adjust_num_threads() argument
374 num_threads = MIN2(num_threads, queue->max_threads); in util_queue_adjust_num_threads()
377 simple_mtx_lock(&queue->finish_lock); in util_queue_adjust_num_threads()
378 unsigned old_num_threads = queue->num_threads; in util_queue_adjust_num_threads()
381 simple_mtx_unlock(&queue->finish_lock); in util_queue_adjust_num_threads()
386 util_queue_kill_threads(queue, num_threads, true); in util_queue_adjust_num_threads()
387 simple_mtx_unlock(&queue->finish_lock); in util_queue_adjust_num_threads()
396 queue->num_threads = num_threads; in util_queue_adjust_num_threads()
398 if (!util_queue_create_thread(queue, i)) in util_queue_adjust_num_threads()
401 simple_mtx_unlock(&queue->finish_lock); in util_queue_adjust_num_threads()
405 util_queue_init(struct util_queue *queue, in util_queue_init() argument
424 const int max_chars = sizeof(queue->name) - 1; in util_queue_init()
433 memset(queue, 0, sizeof(*queue)); in util_queue_init()
436 snprintf(queue->name, sizeof(queue->name), "%.*s:%s", in util_queue_init()
439 snprintf(queue->name, sizeof(queue->name), "%s", name); in util_queue_init()
442 queue->flags = flags; in util_queue_init()
443 queue->max_threads = num_threads; in util_queue_init()
444 queue->num_threads = (flags & UTIL_QUEUE_INIT_SCALE_THREADS) ? 1 : num_threads; in util_queue_init()
445 queue->max_jobs = max_jobs; in util_queue_init()
446 queue->global_data = global_data; in util_queue_init()
448 (void) mtx_init(&queue->lock, mtx_plain); in util_queue_init()
449 (void) simple_mtx_init(&queue->finish_lock, mtx_plain); in util_queue_init()
451 queue->num_queued = 0; in util_queue_init()
452 cnd_init(&queue->has_queued_cond); in util_queue_init()
453 cnd_init(&queue->has_space_cond); in util_queue_init()
455 queue->jobs = (struct util_queue_job*) in util_queue_init()
457 if (!queue->jobs) in util_queue_init()
460 queue->threads = (thrd_t*) calloc(queue->max_threads, sizeof(thrd_t)); in util_queue_init()
461 if (!queue->threads) in util_queue_init()
465 for (i = 0; i < queue->num_threads; i++) { in util_queue_init()
466 if (!util_queue_create_thread(queue, i)) { in util_queue_init()
472 queue->num_threads = i; in util_queue_init()
478 add_to_atexit_list(queue); in util_queue_init()
482 free(queue->threads); in util_queue_init()
484 if (queue->jobs) { in util_queue_init()
485 cnd_destroy(&queue->has_space_cond); in util_queue_init()
486 cnd_destroy(&queue->has_queued_cond); in util_queue_init()
487 mtx_destroy(&queue->lock); in util_queue_init()
488 free(queue->jobs); in util_queue_init()
491 memset(queue, 0, sizeof(*queue)); in util_queue_init()
496 util_queue_kill_threads(struct util_queue *queue, unsigned keep_num_threads, in util_queue_kill_threads() argument
503 simple_mtx_lock(&queue->finish_lock); in util_queue_kill_threads()
505 if (keep_num_threads >= queue->num_threads) { in util_queue_kill_threads()
506 simple_mtx_unlock(&queue->finish_lock); in util_queue_kill_threads()
510 mtx_lock(&queue->lock); in util_queue_kill_threads()
511 unsigned old_num_threads = queue->num_threads; in util_queue_kill_threads()
515 queue->num_threads = keep_num_threads; in util_queue_kill_threads()
516 cnd_broadcast(&queue->has_queued_cond); in util_queue_kill_threads()
517 mtx_unlock(&queue->lock); in util_queue_kill_threads()
520 thrd_join(queue->threads[i], NULL); in util_queue_kill_threads()
523 simple_mtx_unlock(&queue->finish_lock); in util_queue_kill_threads()
534 util_queue_destroy(struct util_queue *queue) in util_queue_destroy() argument
536 util_queue_kill_threads(queue, 0, false); in util_queue_destroy()
538 /* This makes it safe to call on a queue that failedutil_queue_init. */ in util_queue_destroy()
539 if (queue->head.next != NULL) in util_queue_destroy()
540 remove_from_atexit_list(queue); in util_queue_destroy()
542 cnd_destroy(&queue->has_space_cond); in util_queue_destroy()
543 cnd_destroy(&queue->has_queued_cond); in util_queue_destroy()
544 simple_mtx_destroy(&queue->finish_lock); in util_queue_destroy()
545 mtx_destroy(&queue->lock); in util_queue_destroy()
546 free(queue->jobs); in util_queue_destroy()
547 free(queue->threads); in util_queue_destroy()
551 util_queue_add_job(struct util_queue *queue, in util_queue_add_job() argument
560 mtx_lock(&queue->lock); in util_queue_add_job()
561 if (queue->num_threads == 0) { in util_queue_add_job()
562 mtx_unlock(&queue->lock); in util_queue_add_job()
572 assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs); in util_queue_add_job()
575 if (queue->num_queued == queue->max_jobs) { in util_queue_add_job()
576 if ((queue->flags & UTIL_QUEUE_INIT_SCALE_THREADS) && in util_queue_add_job()
578 queue->num_threads < queue->max_threads) { in util_queue_add_job()
579 util_queue_adjust_num_threads(queue, queue->num_threads + 1); in util_queue_add_job()
582 if (queue->flags & UTIL_QUEUE_INIT_RESIZE_IF_FULL && in util_queue_add_job()
583 queue->total_jobs_size + job_size < S_256MB) { in util_queue_add_job()
584 /* If the queue is full, make it larger to avoid waiting for a free in util_queue_add_job()
587 unsigned new_max_jobs = queue->max_jobs + 8; in util_queue_add_job()
595 unsigned i = queue->read_idx; in util_queue_add_job()
598 jobs[num_jobs++] = queue->jobs[i]; in util_queue_add_job()
599 i = (i + 1) % queue->max_jobs; in util_queue_add_job()
600 } while (i != queue->write_idx); in util_queue_add_job()
602 assert(num_jobs == queue->num_queued); in util_queue_add_job()
604 free(queue->jobs); in util_queue_add_job()
605 queue->jobs = jobs; in util_queue_add_job()
606 queue->read_idx = 0; in util_queue_add_job()
607 queue->write_idx = num_jobs; in util_queue_add_job()
608 queue->max_jobs = new_max_jobs; in util_queue_add_job()
611 while (queue->num_queued == queue->max_jobs) in util_queue_add_job()
612 cnd_wait(&queue->has_space_cond, &queue->lock); in util_queue_add_job()
616 ptr = &queue->jobs[queue->write_idx]; in util_queue_add_job()
619 ptr->global_data = queue->global_data; in util_queue_add_job()
625 queue->write_idx = (queue->write_idx + 1) % queue->max_jobs; in util_queue_add_job()
626 queue->total_jobs_size += ptr->job_size; in util_queue_add_job()
628 queue->num_queued++; in util_queue_add_job()
629 cnd_signal(&queue->has_queued_cond); in util_queue_add_job()
630 mtx_unlock(&queue->lock); in util_queue_add_job()
635 * the queue. If the job has started execution, the function waits for it to
644 util_queue_drop_job(struct util_queue *queue, struct util_queue_fence *fence) in util_queue_drop_job() argument
651 mtx_lock(&queue->lock); in util_queue_drop_job()
652 for (unsigned i = queue->read_idx; i != queue->write_idx; in util_queue_drop_job()
653 i = (i + 1) % queue->max_jobs) { in util_queue_drop_job()
654 if (queue->jobs[i].fence == fence) { in util_queue_drop_job()
655 if (queue->jobs[i].cleanup) in util_queue_drop_job()
656 queue->jobs[i].cleanup(queue->jobs[i].job, queue->global_data, -1); in util_queue_drop_job()
659 memset(&queue->jobs[i], 0, sizeof(queue->jobs[i])); in util_queue_drop_job()
664 mtx_unlock(&queue->lock); in util_queue_drop_job()
676 util_queue_finish(struct util_queue *queue) in util_queue_finish() argument
685 simple_mtx_lock(&queue->finish_lock); in util_queue_finish()
688 if (!queue->num_threads) { in util_queue_finish()
689 simple_mtx_unlock(&queue->finish_lock); in util_queue_finish()
693 fences = malloc(queue->num_threads * sizeof(*fences)); in util_queue_finish()
694 util_barrier_init(&barrier, queue->num_threads); in util_queue_finish()
696 for (unsigned i = 0; i < queue->num_threads; ++i) { in util_queue_finish()
698 util_queue_add_job(queue, &barrier, &fences[i], in util_queue_finish()
702 for (unsigned i = 0; i < queue->num_threads; ++i) { in util_queue_finish()
706 simple_mtx_unlock(&queue->finish_lock); in util_queue_finish()
714 util_queue_get_thread_time_nano(struct util_queue *queue, unsigned thread_index) in util_queue_get_thread_time_nano() argument
717 if (thread_index >= queue->num_threads) in util_queue_get_thread_time_nano()
720 return util_thread_get_time_nano(queue->threads[thread_index]); in util_queue_get_thread_time_nano()