• Home
  • Raw
  • Download

Lines Matching refs:rl

444 		struct request_list *rl;  in __blk_drain_queue()  local
446 blk_queue_for_each_rl(rl, q) in __blk_drain_queue()
447 for (i = 0; i < ARRAY_SIZE(rl->wait); i++) in __blk_drain_queue()
448 wake_up_all(&rl->wait[i]); in __blk_drain_queue()
555 int blk_init_rl(struct request_list *rl, struct request_queue *q, in blk_init_rl() argument
558 if (unlikely(rl->rq_pool)) in blk_init_rl()
561 rl->q = q; in blk_init_rl()
562 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; in blk_init_rl()
563 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; in blk_init_rl()
564 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); in blk_init_rl()
565 init_waitqueue_head(&rl->wait[BLK_RW_ASYNC]); in blk_init_rl()
567 rl->rq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab, in blk_init_rl()
570 if (!rl->rq_pool) in blk_init_rl()
576 void blk_exit_rl(struct request_list *rl) in blk_exit_rl() argument
578 if (rl->rq_pool) in blk_exit_rl()
579 mempool_destroy(rl->rq_pool); in blk_exit_rl()
760 static inline void blk_free_request(struct request_list *rl, struct request *rq) in blk_free_request() argument
763 elv_put_request(rl->q, rq); in blk_free_request()
768 mempool_free(rq, rl->rq_pool); in blk_free_request()
805 static void __freed_request(struct request_list *rl, int sync) in __freed_request() argument
807 struct request_queue *q = rl->q; in __freed_request()
813 if (rl == &q->root_rl && in __freed_request()
814 rl->count[sync] < queue_congestion_off_threshold(q)) in __freed_request()
817 if (rl->count[sync] + 1 <= q->nr_requests) { in __freed_request()
818 if (waitqueue_active(&rl->wait[sync])) in __freed_request()
819 wake_up(&rl->wait[sync]); in __freed_request()
821 blk_clear_rl_full(rl, sync); in __freed_request()
829 static void freed_request(struct request_list *rl, unsigned int flags) in freed_request() argument
831 struct request_queue *q = rl->q; in freed_request()
835 rl->count[sync]--; in freed_request()
839 __freed_request(rl, sync); in freed_request()
841 if (unlikely(rl->starved[sync ^ 1])) in freed_request()
842 __freed_request(rl, sync ^ 1); in freed_request()
894 static struct request *__get_request(struct request_list *rl, int rw_flags, in __get_request() argument
897 struct request_queue *q = rl->q; in __get_request()
912 if (rl->count[is_sync]+1 >= queue_congestion_on_threshold(q)) { in __get_request()
913 if (rl->count[is_sync]+1 >= q->nr_requests) { in __get_request()
920 if (!blk_rl_full(rl, is_sync)) { in __get_request()
922 blk_set_rl_full(rl, is_sync); in __get_request()
939 if (rl == &q->root_rl) in __get_request()
948 if (rl->count[is_sync] >= (3 * q->nr_requests / 2)) in __get_request()
952 rl->count[is_sync]++; in __get_request()
953 rl->starved[is_sync] = 0; in __get_request()
977 rq = mempool_alloc(rl->rq_pool, gfp_mask); in __get_request()
982 blk_rq_set_rl(rq, rl); in __get_request()
1042 freed_request(rl, rw_flags); in __get_request()
1052 if (unlikely(rl->count[is_sync] == 0)) in __get_request()
1053 rl->starved[is_sync] = 1; in __get_request()
1076 struct request_list *rl; in get_request() local
1079 rl = blk_get_rl(q, bio); /* transferred to @rq on success */ in get_request()
1081 rq = __get_request(rl, rw_flags, bio, gfp_mask); in get_request()
1086 blk_put_rl(rl); in get_request()
1091 prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, in get_request()
1107 finish_wait(&rl->wait[is_sync], &wait); in get_request()
1291 struct request_list *rl = blk_rq_rl(req); in __blk_put_request() local
1296 blk_free_request(rl, req); in __blk_put_request()
1297 freed_request(rl, flags); in __blk_put_request()
1298 blk_put_rl(rl); in __blk_put_request()