Lines Matching refs:next
585 struct request *rq, *next; in blk_complete_reqs() local
587 llist_for_each_entry_safe(rq, next, entry, ipi_list) in blk_complete_reqs()
779 struct request *rq, *next; in blk_mq_requeue_work() local
785 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { in blk_mq_requeue_work()
804 rq = list_entry(rq_list.next, struct request, queuelist); in blk_mq_requeue_work()
907 static bool blk_mq_req_expired(struct request *rq, unsigned long *next) in blk_mq_req_expired() argument
920 if (*next == 0) in blk_mq_req_expired()
921 *next = deadline; in blk_mq_req_expired()
922 else if (time_after(*next, deadline)) in blk_mq_req_expired()
923 *next = deadline; in blk_mq_req_expired()
938 unsigned long *next = priv; in blk_mq_check_expired() local
947 if (blk_mq_req_expired(rq, next)) in blk_mq_check_expired()
956 unsigned long next = 0; in blk_mq_timeout_work() local
976 blk_mq_queue_tag_busy_iter(q, blk_mq_check_expired, &next); in blk_mq_timeout_work()
978 if (next != 0) { in blk_mq_timeout_work()
979 mod_timer(&q->timeout, next); in blk_mq_timeout_work()
1045 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); in dispatch_rq_from_ctx()
1253 struct request *next = in blk_mq_handle_dev_resource() local
1260 if (next) in blk_mq_handle_dev_resource()
1261 blk_mq_put_driver_tag(next); in blk_mq_handle_dev_resource()
1932 struct request *rq, *head_rq = list_entry_rq(list.next); in blk_mq_flush_plug_list()
2613 struct request *rq, *next; in blk_mq_hctx_notify_dead() local
2635 list_for_each_entry_safe(rq, next, &tmp, queuelist) in blk_mq_hctx_notify_dead()
3102 struct blk_mq_hw_ctx *hctx, *next; in blk_mq_release() local
3109 list_for_each_entry_safe(hctx, next, &q->unused_hctx_list, hctx_list) { in blk_mq_release()