Lines Matching refs:khd
465 struct kyber_hctx_data *khd; in kyber_init_hctx() local
468 khd = kmalloc_node(sizeof(*khd), GFP_KERNEL, hctx->numa_node); in kyber_init_hctx()
469 if (!khd) in kyber_init_hctx()
472 khd->kcqs = kmalloc_array_node(hctx->nr_ctx, in kyber_init_hctx()
475 if (!khd->kcqs) in kyber_init_hctx()
479 kyber_ctx_queue_init(&khd->kcqs[i]); in kyber_init_hctx()
482 if (sbitmap_init_node(&khd->kcq_map[i], hctx->nr_ctx, in kyber_init_hctx()
486 sbitmap_free(&khd->kcq_map[i]); in kyber_init_hctx()
491 spin_lock_init(&khd->lock); in kyber_init_hctx()
494 INIT_LIST_HEAD(&khd->rqs[i]); in kyber_init_hctx()
495 khd->domain_wait[i].sbq = NULL; in kyber_init_hctx()
496 init_waitqueue_func_entry(&khd->domain_wait[i].wait, in kyber_init_hctx()
498 khd->domain_wait[i].wait.private = hctx; in kyber_init_hctx()
499 INIT_LIST_HEAD(&khd->domain_wait[i].wait.entry); in kyber_init_hctx()
500 atomic_set(&khd->wait_index[i], 0); in kyber_init_hctx()
503 khd->cur_domain = 0; in kyber_init_hctx()
504 khd->batching = 0; in kyber_init_hctx()
506 hctx->sched_data = khd; in kyber_init_hctx()
512 kfree(khd->kcqs); in kyber_init_hctx()
514 kfree(khd); in kyber_init_hctx()
520 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_exit_hctx() local
524 sbitmap_free(&khd->kcq_map[i]); in kyber_exit_hctx()
525 kfree(khd->kcqs); in kyber_exit_hctx()
571 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_bio_merge() local
572 struct kyber_ctx_queue *kcq = &khd->kcqs[ctx->index_hw[hctx->type]]; in kyber_bio_merge()
592 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_insert_requests() local
597 struct kyber_ctx_queue *kcq = &khd->kcqs[rq->mq_ctx->index_hw[hctx->type]]; in kyber_insert_requests()
606 sbitmap_set_bit(&khd->kcq_map[sched_domain], in kyber_insert_requests()
660 struct kyber_hctx_data *khd; member
668 struct kyber_ctx_queue *kcq = &flush_data->khd->kcqs[bitnr]; in flush_busy_kcq()
679 static void kyber_flush_busy_kcqs(struct kyber_hctx_data *khd, in kyber_flush_busy_kcqs() argument
684 .khd = khd, in kyber_flush_busy_kcqs()
689 sbitmap_for_each_set(&khd->kcq_map[sched_domain], in kyber_flush_busy_kcqs()
705 struct kyber_hctx_data *khd, in kyber_get_domain_token() argument
708 unsigned int sched_domain = khd->cur_domain; in kyber_get_domain_token()
710 struct sbq_wait *wait = &khd->domain_wait[sched_domain]; in kyber_get_domain_token()
723 &khd->wait_index[sched_domain]); in kyber_get_domain_token()
724 khd->domain_ws[sched_domain] = ws; in kyber_get_domain_token()
742 ws = khd->domain_ws[sched_domain]; in kyber_get_domain_token()
753 struct kyber_hctx_data *khd, in kyber_dispatch_cur_domain() argument
760 rqs = &khd->rqs[khd->cur_domain]; in kyber_dispatch_cur_domain()
772 nr = kyber_get_domain_token(kqd, khd, hctx); in kyber_dispatch_cur_domain()
774 khd->batching++; in kyber_dispatch_cur_domain()
780 kyber_domain_names[khd->cur_domain]); in kyber_dispatch_cur_domain()
782 } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) { in kyber_dispatch_cur_domain()
783 nr = kyber_get_domain_token(kqd, khd, hctx); in kyber_dispatch_cur_domain()
785 kyber_flush_busy_kcqs(khd, khd->cur_domain, rqs); in kyber_dispatch_cur_domain()
787 khd->batching++; in kyber_dispatch_cur_domain()
793 kyber_domain_names[khd->cur_domain]); in kyber_dispatch_cur_domain()
804 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_dispatch_request() local
808 spin_lock(&khd->lock); in kyber_dispatch_request()
814 if (khd->batching < kyber_batch_size[khd->cur_domain]) { in kyber_dispatch_request()
815 rq = kyber_dispatch_cur_domain(kqd, khd, hctx); in kyber_dispatch_request()
829 khd->batching = 0; in kyber_dispatch_request()
831 if (khd->cur_domain == KYBER_NUM_DOMAINS - 1) in kyber_dispatch_request()
832 khd->cur_domain = 0; in kyber_dispatch_request()
834 khd->cur_domain++; in kyber_dispatch_request()
836 rq = kyber_dispatch_cur_domain(kqd, khd, hctx); in kyber_dispatch_request()
843 spin_unlock(&khd->lock); in kyber_dispatch_request()
849 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_has_work() local
853 if (!list_empty_careful(&khd->rqs[i]) || in kyber_has_work()
854 sbitmap_any_bit_set(&khd->kcq_map[i])) in kyber_has_work()
909 __acquires(&khd->lock) \
912 struct kyber_hctx_data *khd = hctx->sched_data; \
914 spin_lock(&khd->lock); \
915 return seq_list_start(&khd->rqs[domain], *pos); \
922 struct kyber_hctx_data *khd = hctx->sched_data; \
924 return seq_list_next(v, &khd->rqs[domain], pos); \
928 __releases(&khd->lock) \
931 struct kyber_hctx_data *khd = hctx->sched_data; \
933 spin_unlock(&khd->lock); \
946 struct kyber_hctx_data *khd = hctx->sched_data; \
947 wait_queue_entry_t *wait = &khd->domain_wait[domain].wait; \
970 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_cur_domain_show() local
972 seq_printf(m, "%s\n", kyber_domain_names[khd->cur_domain]); in kyber_cur_domain_show()
979 struct kyber_hctx_data *khd = hctx->sched_data; in kyber_batching_show() local
981 seq_printf(m, "%u\n", khd->batching); in kyber_batching_show()