Home
last modified time | relevance | path

Searched refs:rl (Results 1 – 5 of 5) sorted by relevance

/block/
Dblk-cgroup.h104 struct request_list rl; member
320 return &blkg->rl; in blk_get_rl()
333 static inline void blk_put_rl(struct request_list *rl) in blk_put_rl() argument
336 if (rl->blkg && rl->blkg->blkcg != &blkcg_root) in blk_put_rl()
337 blkg_put(rl->blkg); in blk_put_rl()
348 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) in blk_rq_set_rl() argument
350 rq->rl = rl; in blk_rq_set_rl()
361 return rq->rl; in blk_rq_rl()
364 struct request_list *__blk_queue_next_rl(struct request_list *rl,
371 #define blk_queue_for_each_rl(rl, q) \ argument
[all …]
Dblk-core.c444 struct request_list *rl; in __blk_drain_queue() local
446 blk_queue_for_each_rl(rl, q) in __blk_drain_queue()
447 for (i = 0; i < ARRAY_SIZE(rl->wait); i++) in __blk_drain_queue()
448 wake_up_all(&rl->wait[i]); in __blk_drain_queue()
555 int blk_init_rl(struct request_list *rl, struct request_queue *q, in blk_init_rl() argument
558 if (unlikely(rl->rq_pool)) in blk_init_rl()
561 rl->q = q; in blk_init_rl()
562 rl->count[BLK_RW_SYNC] = rl->count[BLK_RW_ASYNC] = 0; in blk_init_rl()
563 rl->starved[BLK_RW_SYNC] = rl->starved[BLK_RW_ASYNC] = 0; in blk_init_rl()
564 init_waitqueue_head(&rl->wait[BLK_RW_SYNC]); in blk_init_rl()
[all …]
Dblk-sysfs.c49 struct request_list *rl; in queue_requests_store() local
68 rl = &q->root_rl; in queue_requests_store()
70 if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q)) in queue_requests_store()
72 else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q)) in queue_requests_store()
75 if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q)) in queue_requests_store()
77 else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q)) in queue_requests_store()
80 blk_queue_for_each_rl(rl, q) { in queue_requests_store()
81 if (rl->count[BLK_RW_SYNC] >= q->nr_requests) { in queue_requests_store()
82 blk_set_rl_full(rl, BLK_RW_SYNC); in queue_requests_store()
84 blk_clear_rl_full(rl, BLK_RW_SYNC); in queue_requests_store()
[all …]
Dblk-cgroup.c87 blk_exit_rl(&blkg->rl); in blkg_free()
117 if (blk_init_rl(&blkg->rl, q, gfp_mask)) in blkg_alloc()
119 blkg->rl.blkg = blkg; in blkg_alloc()
426 struct request_list *__blk_queue_next_rl(struct request_list *rl, in __blk_queue_next_rl() argument
436 if (rl == &q->root_rl) { in __blk_queue_next_rl()
442 blkg = container_of(rl, struct blkcg_gq, rl); in __blk_queue_next_rl()
454 return &blkg->rl; in __blk_queue_next_rl()
Dblk.h21 int blk_init_rl(struct request_list *rl, struct request_queue *q,
23 void blk_exit_rl(struct request_list *rl);