• Home
  • Raw
  • Download

Lines Matching full:queue

47 util_queue_kill_threads(struct util_queue *queue, unsigned keep_num_threads,
85 add_to_atexit_list(struct util_queue *queue) in add_to_atexit_list() argument
90 list_add(&queue->head, &queue_list); in add_to_atexit_list()
95 remove_from_atexit_list(struct util_queue *queue) in remove_from_atexit_list() argument
101 if (iter == queue) { in remove_from_atexit_list()
249 struct util_queue *queue; member
256 struct util_queue *queue = ((struct thread_input*)input)->queue; in util_queue_thread_func() local
261 if (queue->flags & UTIL_QUEUE_INIT_SET_FULL_THREAD_AFFINITY) { in util_queue_thread_func()
274 if (queue->flags & UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY) { in util_queue_thread_func()
280 if (strlen(queue->name) > 0) { in util_queue_thread_func()
282 snprintf(name, sizeof(name), "%s%i", queue->name, thread_index); in util_queue_thread_func()
289 mtx_lock(&queue->lock); in util_queue_thread_func()
290 assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs); in util_queue_thread_func()
292 /* wait if the queue is empty */ in util_queue_thread_func()
293 while (thread_index < queue->num_threads && queue->num_queued == 0) in util_queue_thread_func()
294 cnd_wait(&queue->has_queued_cond, &queue->lock); in util_queue_thread_func()
297 if (thread_index >= queue->num_threads) { in util_queue_thread_func()
298 mtx_unlock(&queue->lock); in util_queue_thread_func()
302 job = queue->jobs[queue->read_idx]; in util_queue_thread_func()
303 memset(&queue->jobs[queue->read_idx], 0, sizeof(struct util_queue_job)); in util_queue_thread_func()
304 queue->read_idx = (queue->read_idx + 1) % queue->max_jobs; in util_queue_thread_func()
306 queue->num_queued--; in util_queue_thread_func()
307 cnd_signal(&queue->has_space_cond); in util_queue_thread_func()
309 queue->total_jobs_size -= job.job_size; in util_queue_thread_func()
310 mtx_unlock(&queue->lock); in util_queue_thread_func()
322 mtx_lock(&queue->lock); in util_queue_thread_func()
323 if (queue->num_threads == 0) { in util_queue_thread_func()
324 for (unsigned i = queue->read_idx; i != queue->write_idx; in util_queue_thread_func()
325 i = (i + 1) % queue->max_jobs) { in util_queue_thread_func()
326 if (queue->jobs[i].job) { in util_queue_thread_func()
327 if (queue->jobs[i].fence) in util_queue_thread_func()
328 util_queue_fence_signal(queue->jobs[i].fence); in util_queue_thread_func()
329 queue->jobs[i].job = NULL; in util_queue_thread_func()
332 queue->read_idx = queue->write_idx; in util_queue_thread_func()
333 queue->num_queued = 0; in util_queue_thread_func()
335 mtx_unlock(&queue->lock); in util_queue_thread_func()
340 util_queue_create_thread(struct util_queue *queue, unsigned index) in util_queue_create_thread() argument
344 input->queue = queue; in util_queue_create_thread()
347 if (thrd_success != u_thread_create(queue->threads + index, util_queue_thread_func, input)) { in util_queue_create_thread()
352 if (queue->flags & UTIL_QUEUE_INIT_USE_MINIMUM_PRIORITY) { in util_queue_create_thread()
363 pthread_setschedparam(queue->threads[index], SCHED_BATCH, &sched_param); in util_queue_create_thread()
370 util_queue_adjust_num_threads(struct util_queue *queue, unsigned num_threads, in util_queue_adjust_num_threads() argument
373 num_threads = MIN2(num_threads, queue->max_threads); in util_queue_adjust_num_threads()
377 mtx_lock(&queue->lock); in util_queue_adjust_num_threads()
379 unsigned old_num_threads = queue->num_threads; in util_queue_adjust_num_threads()
383 mtx_unlock(&queue->lock); in util_queue_adjust_num_threads()
388 util_queue_kill_threads(queue, num_threads, true); in util_queue_adjust_num_threads()
390 mtx_unlock(&queue->lock); in util_queue_adjust_num_threads()
399 queue->num_threads = num_threads; in util_queue_adjust_num_threads()
401 if (!util_queue_create_thread(queue, i)) { in util_queue_adjust_num_threads()
402 queue->num_threads = i; in util_queue_adjust_num_threads()
408 mtx_unlock(&queue->lock); in util_queue_adjust_num_threads()
412 util_queue_init(struct util_queue *queue, in util_queue_init() argument
431 const int max_chars = sizeof(queue->name) - 1; in util_queue_init()
440 memset(queue, 0, sizeof(*queue)); in util_queue_init()
443 snprintf(queue->name, sizeof(queue->name), "%.*s:%s", in util_queue_init()
446 snprintf(queue->name, sizeof(queue->name), "%s", name); in util_queue_init()
449 queue->create_threads_on_demand = true; in util_queue_init()
450 queue->flags = flags; in util_queue_init()
451 queue->max_threads = num_threads; in util_queue_init()
452 queue->num_threads = 1; in util_queue_init()
453 queue->max_jobs = max_jobs; in util_queue_init()
454 queue->global_data = global_data; in util_queue_init()
456 (void) mtx_init(&queue->lock, mtx_plain); in util_queue_init()
458 queue->num_queued = 0; in util_queue_init()
459 cnd_init(&queue->has_queued_cond); in util_queue_init()
460 cnd_init(&queue->has_space_cond); in util_queue_init()
462 queue->jobs = (struct util_queue_job*) in util_queue_init()
464 if (!queue->jobs) in util_queue_init()
467 queue->threads = (thrd_t*) calloc(queue->max_threads, sizeof(thrd_t)); in util_queue_init()
468 if (!queue->threads) in util_queue_init()
472 for (i = 0; i < queue->num_threads; i++) { in util_queue_init()
473 if (!util_queue_create_thread(queue, i)) { in util_queue_init()
479 queue->num_threads = i; in util_queue_init()
485 add_to_atexit_list(queue); in util_queue_init()
489 free(queue->threads); in util_queue_init()
491 if (queue->jobs) { in util_queue_init()
492 cnd_destroy(&queue->has_space_cond); in util_queue_init()
493 cnd_destroy(&queue->has_queued_cond); in util_queue_init()
494 mtx_destroy(&queue->lock); in util_queue_init()
495 free(queue->jobs); in util_queue_init()
498 memset(queue, 0, sizeof(*queue)); in util_queue_init()
503 util_queue_kill_threads(struct util_queue *queue, unsigned keep_num_threads, in util_queue_kill_threads() argument
508 mtx_lock(&queue->lock); in util_queue_kill_threads()
510 if (keep_num_threads >= queue->num_threads) { in util_queue_kill_threads()
512 mtx_unlock(&queue->lock); in util_queue_kill_threads()
516 unsigned old_num_threads = queue->num_threads; in util_queue_kill_threads()
520 queue->num_threads = keep_num_threads; in util_queue_kill_threads()
521 cnd_broadcast(&queue->has_queued_cond); in util_queue_kill_threads()
526 mtx_unlock(&queue->lock); in util_queue_kill_threads()
528 thrd_join(queue->threads[i], NULL); in util_queue_kill_threads()
530 mtx_lock(&queue->lock); in util_queue_kill_threads()
533 mtx_unlock(&queue->lock); in util_queue_kill_threads()
546 util_queue_destroy(struct util_queue *queue) in util_queue_destroy() argument
548 util_queue_kill_threads(queue, 0, false); in util_queue_destroy()
550 /* This makes it safe to call on a queue that failed util_queue_init. */ in util_queue_destroy()
551 if (queue->head.next != NULL) in util_queue_destroy()
552 remove_from_atexit_list(queue); in util_queue_destroy()
554 cnd_destroy(&queue->has_space_cond); in util_queue_destroy()
555 cnd_destroy(&queue->has_queued_cond); in util_queue_destroy()
556 mtx_destroy(&queue->lock); in util_queue_destroy()
557 free(queue->jobs); in util_queue_destroy()
558 free(queue->threads); in util_queue_destroy()
562 util_queue_add_job_locked(struct util_queue *queue, in util_queue_add_job_locked() argument
573 mtx_lock(&queue->lock); in util_queue_add_job_locked()
574 if (queue->num_threads == 0) { in util_queue_add_job_locked()
576 mtx_unlock(&queue->lock); in util_queue_add_job_locked()
586 assert(queue->num_queued >= 0 && queue->num_queued <= queue->max_jobs); in util_queue_add_job_locked()
589 if (queue->num_queued > 0 && in util_queue_add_job_locked()
590 queue->create_threads_on_demand && in util_queue_add_job_locked()
592 queue->num_threads < queue->max_threads) { in util_queue_add_job_locked()
593 util_queue_adjust_num_threads(queue, queue->num_threads + 1, true); in util_queue_add_job_locked()
596 if (queue->num_queued == queue->max_jobs) { in util_queue_add_job_locked()
597 if (queue->flags & UTIL_QUEUE_INIT_RESIZE_IF_FULL && in util_queue_add_job_locked()
598 queue->total_jobs_size + job_size < S_256MB) { in util_queue_add_job_locked()
599 /* If the queue is full, make it larger to avoid waiting for a free in util_queue_add_job_locked()
602 unsigned new_max_jobs = queue->max_jobs + 8; in util_queue_add_job_locked()
610 unsigned i = queue->read_idx; in util_queue_add_job_locked()
613 jobs[num_jobs++] = queue->jobs[i]; in util_queue_add_job_locked()
614 i = (i + 1) % queue->max_jobs; in util_queue_add_job_locked()
615 } while (i != queue->write_idx); in util_queue_add_job_locked()
617 assert(num_jobs == queue->num_queued); in util_queue_add_job_locked()
619 free(queue->jobs); in util_queue_add_job_locked()
620 queue->jobs = jobs; in util_queue_add_job_locked()
621 queue->read_idx = 0; in util_queue_add_job_locked()
622 queue->write_idx = num_jobs; in util_queue_add_job_locked()
623 queue->max_jobs = new_max_jobs; in util_queue_add_job_locked()
626 while (queue->num_queued == queue->max_jobs) in util_queue_add_job_locked()
627 cnd_wait(&queue->has_space_cond, &queue->lock); in util_queue_add_job_locked()
631 ptr = &queue->jobs[queue->write_idx]; in util_queue_add_job_locked()
634 ptr->global_data = queue->global_data; in util_queue_add_job_locked()
640 queue->write_idx = (queue->write_idx + 1) % queue->max_jobs; in util_queue_add_job_locked()
641 queue->total_jobs_size += ptr->job_size; in util_queue_add_job_locked()
643 queue->num_queued++; in util_queue_add_job_locked()
644 cnd_signal(&queue->has_queued_cond); in util_queue_add_job_locked()
646 mtx_unlock(&queue->lock); in util_queue_add_job_locked()
650 util_queue_add_job(struct util_queue *queue, in util_queue_add_job() argument
657 util_queue_add_job_locked(queue, job, fence, execute, cleanup, job_size, in util_queue_add_job()
663 * the queue. If the job has started execution, the function waits for it to
672 util_queue_drop_job(struct util_queue *queue, struct util_queue_fence *fence) in util_queue_drop_job() argument
679 mtx_lock(&queue->lock); in util_queue_drop_job()
680 for (unsigned i = queue->read_idx; i != queue->write_idx; in util_queue_drop_job()
681 i = (i + 1) % queue->max_jobs) { in util_queue_drop_job()
682 if (queue->jobs[i].fence == fence) { in util_queue_drop_job()
683 if (queue->jobs[i].cleanup) in util_queue_drop_job()
684 queue->jobs[i].cleanup(queue->jobs[i].job, queue->global_data, -1); in util_queue_drop_job()
687 memset(&queue->jobs[i], 0, sizeof(queue->jobs[i])); in util_queue_drop_job()
692 mtx_unlock(&queue->lock); in util_queue_drop_job()
704 util_queue_finish(struct util_queue *queue) in util_queue_finish() argument
713 mtx_lock(&queue->lock); in util_queue_finish()
716 if (!queue->num_threads) { in util_queue_finish()
717 mtx_unlock(&queue->lock); in util_queue_finish()
725 * enough space in the queue and wait for space. in util_queue_finish()
727 queue->create_threads_on_demand = false; in util_queue_finish()
729 fences = malloc(queue->num_threads * sizeof(*fences)); in util_queue_finish()
730 util_barrier_init(&barrier, queue->num_threads); in util_queue_finish()
732 for (unsigned i = 0; i < queue->num_threads; ++i) { in util_queue_finish()
734 util_queue_add_job_locked(queue, &barrier, &fences[i], in util_queue_finish()
737 queue->create_threads_on_demand = true; in util_queue_finish()
738 mtx_unlock(&queue->lock); in util_queue_finish()
740 for (unsigned i = 0; i < queue->num_threads; ++i) { in util_queue_finish()
749 util_queue_get_thread_time_nano(struct util_queue *queue, unsigned thread_index) in util_queue_get_thread_time_nano() argument
752 if (thread_index >= queue->num_threads) in util_queue_get_thread_time_nano()
755 return util_thread_get_time_nano(queue->threads[thread_index]); in util_queue_get_thread_time_nano()