• Home
  • Raw
  • Download

Lines Matching refs:rq

51 static int blk_mq_poll_stats_bkt(const struct request *rq)  in blk_mq_poll_stats_bkt()  argument
55 ddir = rq_data_dir(rq); in blk_mq_poll_stats_bkt()
56 sectors = blk_rq_stats_sectors(rq); in blk_mq_poll_stats_bkt()
105 struct request *rq, void *priv, in blk_mq_check_inflight() argument
110 if ((!mi->part->bd_partno || rq->part == mi->part) && in blk_mq_check_inflight()
111 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) in blk_mq_check_inflight()
112 mi->inflight[rq_data_dir(rq)]++; in blk_mq_check_inflight()
281 static inline bool blk_mq_need_time_stamp(struct request *rq) in blk_mq_need_time_stamp() argument
283 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator; in blk_mq_need_time_stamp()
290 struct request *rq = tags->static_rqs[tag]; in blk_mq_rq_ctx_init() local
293 rq->tag = BLK_MQ_NO_TAG; in blk_mq_rq_ctx_init()
294 rq->internal_tag = tag; in blk_mq_rq_ctx_init()
296 rq->tag = tag; in blk_mq_rq_ctx_init()
297 rq->internal_tag = BLK_MQ_NO_TAG; in blk_mq_rq_ctx_init()
301 rq->q = data->q; in blk_mq_rq_ctx_init()
302 rq->mq_ctx = data->ctx; in blk_mq_rq_ctx_init()
303 rq->mq_hctx = data->hctx; in blk_mq_rq_ctx_init()
304 rq->rq_flags = 0; in blk_mq_rq_ctx_init()
305 rq->cmd_flags = data->cmd_flags; in blk_mq_rq_ctx_init()
307 rq->rq_flags |= RQF_PM; in blk_mq_rq_ctx_init()
309 rq->rq_flags |= RQF_IO_STAT; in blk_mq_rq_ctx_init()
310 INIT_LIST_HEAD(&rq->queuelist); in blk_mq_rq_ctx_init()
311 INIT_HLIST_NODE(&rq->hash); in blk_mq_rq_ctx_init()
312 RB_CLEAR_NODE(&rq->rb_node); in blk_mq_rq_ctx_init()
313 rq->rq_disk = NULL; in blk_mq_rq_ctx_init()
314 rq->part = NULL; in blk_mq_rq_ctx_init()
316 rq->alloc_time_ns = alloc_time_ns; in blk_mq_rq_ctx_init()
318 if (blk_mq_need_time_stamp(rq)) in blk_mq_rq_ctx_init()
319 rq->start_time_ns = ktime_get_ns(); in blk_mq_rq_ctx_init()
321 rq->start_time_ns = 0; in blk_mq_rq_ctx_init()
322 rq->io_start_time_ns = 0; in blk_mq_rq_ctx_init()
323 rq->stats_sectors = 0; in blk_mq_rq_ctx_init()
324 rq->nr_phys_segments = 0; in blk_mq_rq_ctx_init()
326 rq->nr_integrity_segments = 0; in blk_mq_rq_ctx_init()
328 blk_crypto_rq_set_defaults(rq); in blk_mq_rq_ctx_init()
330 WRITE_ONCE(rq->deadline, 0); in blk_mq_rq_ctx_init()
332 rq->timeout = 0; in blk_mq_rq_ctx_init()
334 rq->end_io = NULL; in blk_mq_rq_ctx_init()
335 rq->end_io_data = NULL; in blk_mq_rq_ctx_init()
338 refcount_set(&rq->ref, 1); in blk_mq_rq_ctx_init()
343 rq->elv.icq = NULL; in blk_mq_rq_ctx_init()
346 blk_mq_sched_assign_ioc(rq); in blk_mq_rq_ctx_init()
348 e->type->ops.prepare_request(rq); in blk_mq_rq_ctx_init()
349 rq->rq_flags |= RQF_ELVPRIV; in blk_mq_rq_ctx_init()
354 trace_android_vh_blk_rq_ctx_init(rq, tags, data, alloc_time_ns); in blk_mq_rq_ctx_init()
355 return rq; in blk_mq_rq_ctx_init()
420 struct request *rq; in blk_mq_alloc_request() local
427 rq = __blk_mq_alloc_request(&data); in blk_mq_alloc_request()
428 if (!rq) in blk_mq_alloc_request()
430 rq->__data_len = 0; in blk_mq_alloc_request()
431 rq->__sector = (sector_t) -1; in blk_mq_alloc_request()
432 rq->bio = rq->biotail = NULL; in blk_mq_alloc_request()
433 return rq; in blk_mq_alloc_request()
502 static void __blk_mq_free_request(struct request *rq) in __blk_mq_free_request() argument
504 struct request_queue *q = rq->q; in __blk_mq_free_request()
505 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_free_request()
506 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in __blk_mq_free_request()
507 const int sched_tag = rq->internal_tag; in __blk_mq_free_request()
509 blk_crypto_free_request(rq); in __blk_mq_free_request()
510 blk_pm_mark_last_busy(rq); in __blk_mq_free_request()
511 rq->mq_hctx = NULL; in __blk_mq_free_request()
512 if (rq->tag != BLK_MQ_NO_TAG) in __blk_mq_free_request()
513 blk_mq_put_tag(hctx->tags, ctx, rq->tag); in __blk_mq_free_request()
520 void blk_mq_free_request(struct request *rq) in blk_mq_free_request() argument
522 struct request_queue *q = rq->q; in blk_mq_free_request()
524 struct blk_mq_ctx *ctx = rq->mq_ctx; in blk_mq_free_request()
525 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_free_request()
527 if (rq->rq_flags & RQF_ELVPRIV) { in blk_mq_free_request()
529 e->type->ops.finish_request(rq); in blk_mq_free_request()
530 if (rq->elv.icq) { in blk_mq_free_request()
531 put_io_context(rq->elv.icq->ioc); in blk_mq_free_request()
532 rq->elv.icq = NULL; in blk_mq_free_request()
536 ctx->rq_completed[rq_is_sync(rq)]++; in blk_mq_free_request()
537 if (rq->rq_flags & RQF_MQ_INFLIGHT) in blk_mq_free_request()
540 if (unlikely(laptop_mode && !blk_rq_is_passthrough(rq))) in blk_mq_free_request()
543 rq_qos_done(q, rq); in blk_mq_free_request()
545 WRITE_ONCE(rq->state, MQ_RQ_IDLE); in blk_mq_free_request()
546 if (refcount_dec_and_test(&rq->ref)) in blk_mq_free_request()
547 __blk_mq_free_request(rq); in blk_mq_free_request()
551 inline void __blk_mq_end_request(struct request *rq, blk_status_t error) in __blk_mq_end_request() argument
555 if (blk_mq_need_time_stamp(rq)) in __blk_mq_end_request()
558 if (rq->rq_flags & RQF_STATS) { in __blk_mq_end_request()
559 blk_mq_poll_stats_start(rq->q); in __blk_mq_end_request()
560 blk_stat_add(rq, now); in __blk_mq_end_request()
563 blk_mq_sched_completed_request(rq, now); in __blk_mq_end_request()
565 blk_account_io_done(rq, now); in __blk_mq_end_request()
567 if (rq->end_io) { in __blk_mq_end_request()
568 rq_qos_done(rq->q, rq); in __blk_mq_end_request()
569 rq->end_io(rq, error); in __blk_mq_end_request()
571 blk_mq_free_request(rq); in __blk_mq_end_request()
576 void blk_mq_end_request(struct request *rq, blk_status_t error) in blk_mq_end_request() argument
578 if (blk_update_request(rq, error, blk_rq_bytes(rq))) in blk_mq_end_request()
580 __blk_mq_end_request(rq, error); in blk_mq_end_request()
587 struct request *rq, *next; in blk_complete_reqs() local
589 llist_for_each_entry_safe(rq, next, entry, ipi_list) in blk_complete_reqs()
590 rq->q->mq_ops->complete(rq); in blk_complete_reqs()
609 static inline bool blk_mq_complete_need_ipi(struct request *rq) in blk_mq_complete_need_ipi() argument
614 !test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) in blk_mq_complete_need_ipi()
626 if (cpu == rq->mq_ctx->cpu || in blk_mq_complete_need_ipi()
627 (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags) && in blk_mq_complete_need_ipi()
628 cpus_share_cache(cpu, rq->mq_ctx->cpu))) in blk_mq_complete_need_ipi()
632 return cpu_online(rq->mq_ctx->cpu); in blk_mq_complete_need_ipi()
635 static void blk_mq_complete_send_ipi(struct request *rq) in blk_mq_complete_send_ipi() argument
640 cpu = rq->mq_ctx->cpu; in blk_mq_complete_send_ipi()
642 if (llist_add(&rq->ipi_list, list)) { in blk_mq_complete_send_ipi()
643 INIT_CSD(&rq->csd, __blk_mq_complete_request_remote, rq); in blk_mq_complete_send_ipi()
644 smp_call_function_single_async(cpu, &rq->csd); in blk_mq_complete_send_ipi()
648 static void blk_mq_raise_softirq(struct request *rq) in blk_mq_raise_softirq() argument
654 if (llist_add(&rq->ipi_list, list)) in blk_mq_raise_softirq()
659 bool blk_mq_complete_request_remote(struct request *rq) in blk_mq_complete_request_remote() argument
661 WRITE_ONCE(rq->state, MQ_RQ_COMPLETE); in blk_mq_complete_request_remote()
667 if (rq->cmd_flags & REQ_HIPRI) in blk_mq_complete_request_remote()
670 if (blk_mq_complete_need_ipi(rq)) { in blk_mq_complete_request_remote()
671 blk_mq_complete_send_ipi(rq); in blk_mq_complete_request_remote()
675 if (rq->q->nr_hw_queues == 1) { in blk_mq_complete_request_remote()
676 blk_mq_raise_softirq(rq); in blk_mq_complete_request_remote()
690 void blk_mq_complete_request(struct request *rq) in blk_mq_complete_request() argument
692 if (!blk_mq_complete_request_remote(rq)) in blk_mq_complete_request()
693 rq->q->mq_ops->complete(rq); in blk_mq_complete_request()
725 void blk_mq_start_request(struct request *rq) in blk_mq_start_request() argument
727 struct request_queue *q = rq->q; in blk_mq_start_request()
729 trace_block_rq_issue(rq); in blk_mq_start_request()
732 rq->io_start_time_ns = ktime_get_ns(); in blk_mq_start_request()
733 rq->stats_sectors = blk_rq_sectors(rq); in blk_mq_start_request()
734 rq->rq_flags |= RQF_STATS; in blk_mq_start_request()
735 rq_qos_issue(q, rq); in blk_mq_start_request()
738 WARN_ON_ONCE(blk_mq_rq_state(rq) != MQ_RQ_IDLE); in blk_mq_start_request()
740 blk_add_timer(rq); in blk_mq_start_request()
741 WRITE_ONCE(rq->state, MQ_RQ_IN_FLIGHT); in blk_mq_start_request()
744 if (blk_integrity_rq(rq) && req_op(rq) == REQ_OP_WRITE) in blk_mq_start_request()
745 q->integrity.profile->prepare_fn(rq); in blk_mq_start_request()
750 static void __blk_mq_requeue_request(struct request *rq) in __blk_mq_requeue_request() argument
752 struct request_queue *q = rq->q; in __blk_mq_requeue_request()
754 blk_mq_put_driver_tag(rq); in __blk_mq_requeue_request()
756 trace_block_rq_requeue(rq); in __blk_mq_requeue_request()
757 rq_qos_requeue(q, rq); in __blk_mq_requeue_request()
759 if (blk_mq_request_started(rq)) { in __blk_mq_requeue_request()
760 WRITE_ONCE(rq->state, MQ_RQ_IDLE); in __blk_mq_requeue_request()
761 rq->rq_flags &= ~RQF_TIMED_OUT; in __blk_mq_requeue_request()
765 void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list) in blk_mq_requeue_request() argument
767 __blk_mq_requeue_request(rq); in blk_mq_requeue_request()
770 blk_mq_sched_requeue_request(rq); in blk_mq_requeue_request()
772 blk_mq_add_to_requeue_list(rq, true, kick_requeue_list); in blk_mq_requeue_request()
781 struct request *rq, *next; in blk_mq_requeue_work() local
787 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { in blk_mq_requeue_work()
788 if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) in blk_mq_requeue_work()
791 rq->rq_flags &= ~RQF_SOFTBARRIER; in blk_mq_requeue_work()
792 list_del_init(&rq->queuelist); in blk_mq_requeue_work()
798 if (rq->rq_flags & RQF_DONTPREP) in blk_mq_requeue_work()
799 blk_mq_request_bypass_insert(rq, false, false); in blk_mq_requeue_work()
801 blk_mq_sched_insert_request(rq, true, false, false); in blk_mq_requeue_work()
805 rq = list_entry(rq_list.next, struct request, queuelist); in blk_mq_requeue_work()
806 list_del_init(&rq->queuelist); in blk_mq_requeue_work()
807 blk_mq_sched_insert_request(rq, false, false, false); in blk_mq_requeue_work()
813 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, in blk_mq_add_to_requeue_list() argument
816 struct request_queue *q = rq->q; in blk_mq_add_to_requeue_list()
823 BUG_ON(rq->rq_flags & RQF_SOFTBARRIER); in blk_mq_add_to_requeue_list()
827 rq->rq_flags |= RQF_SOFTBARRIER; in blk_mq_add_to_requeue_list()
828 list_add(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
830 list_add_tail(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
863 static bool blk_mq_rq_inflight(struct blk_mq_hw_ctx *hctx, struct request *rq, in blk_mq_rq_inflight() argument
870 if (blk_mq_request_started(rq) && rq->q == hctx->queue) { in blk_mq_rq_inflight()
904 static bool blk_mq_req_expired(struct request *rq, unsigned long *next) in blk_mq_req_expired() argument
908 if (blk_mq_rq_state(rq) != MQ_RQ_IN_FLIGHT) in blk_mq_req_expired()
910 if (rq->rq_flags & RQF_TIMED_OUT) in blk_mq_req_expired()
913 deadline = READ_ONCE(rq->deadline); in blk_mq_req_expired()
924 void blk_mq_put_rq_ref(struct request *rq) in blk_mq_put_rq_ref() argument
926 if (is_flush_rq(rq)) in blk_mq_put_rq_ref()
927 rq->end_io(rq, 0); in blk_mq_put_rq_ref()
928 else if (refcount_dec_and_test(&rq->ref)) in blk_mq_put_rq_ref()
929 __blk_mq_free_request(rq); in blk_mq_put_rq_ref()
933 struct request *rq, void *priv, bool reserved) in blk_mq_check_expired() argument
944 if (blk_mq_req_expired(rq, next)) in blk_mq_check_expired()
945 blk_mq_rq_timed_out(rq, reserved); in blk_mq_check_expired()
1029 struct request *rq; member
1042 dispatch_data->rq = list_entry_rq(ctx->rq_lists[type].next); in dispatch_rq_from_ctx()
1043 list_del_init(&dispatch_data->rq->queuelist); in dispatch_rq_from_ctx()
1049 return !dispatch_data->rq; in dispatch_rq_from_ctx()
1058 .rq = NULL, in blk_mq_dequeue_from_ctx()
1064 return data.rq; in blk_mq_dequeue_from_ctx()
1075 static bool __blk_mq_get_driver_tag(struct request *rq) in __blk_mq_get_driver_tag() argument
1077 struct sbitmap_queue *bt = rq->mq_hctx->tags->bitmap_tags; in __blk_mq_get_driver_tag()
1078 unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags; in __blk_mq_get_driver_tag()
1081 blk_mq_tag_busy(rq->mq_hctx); in __blk_mq_get_driver_tag()
1083 if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) { in __blk_mq_get_driver_tag()
1084 bt = rq->mq_hctx->tags->breserved_tags; in __blk_mq_get_driver_tag()
1087 if (!hctx_may_queue(rq->mq_hctx, bt)) in __blk_mq_get_driver_tag()
1095 rq->tag = tag + tag_offset; in __blk_mq_get_driver_tag()
1099 bool blk_mq_get_driver_tag(struct request *rq) in blk_mq_get_driver_tag() argument
1101 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_get_driver_tag()
1103 if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq)) in blk_mq_get_driver_tag()
1107 !(rq->rq_flags & RQF_MQ_INFLIGHT)) { in blk_mq_get_driver_tag()
1108 rq->rq_flags |= RQF_MQ_INFLIGHT; in blk_mq_get_driver_tag()
1111 hctx->tags->rqs[rq->tag] = rq; in blk_mq_get_driver_tag()
1143 struct request *rq) in blk_mq_mark_tag_wait() argument
1161 return blk_mq_get_driver_tag(rq); in blk_mq_mark_tag_wait()
1203 ret = blk_mq_get_driver_tag(rq); in blk_mq_mark_tag_wait()
1250 static void blk_mq_handle_dev_resource(struct request *rq, in blk_mq_handle_dev_resource() argument
1263 list_add(&rq->queuelist, list); in blk_mq_handle_dev_resource()
1264 __blk_mq_requeue_request(rq); in blk_mq_handle_dev_resource()
1267 static void blk_mq_handle_zone_resource(struct request *rq, in blk_mq_handle_zone_resource() argument
1276 list_add(&rq->queuelist, zone_list); in blk_mq_handle_zone_resource()
1277 __blk_mq_requeue_request(rq); in blk_mq_handle_zone_resource()
1286 static enum prep_dispatch blk_mq_prep_dispatch_rq(struct request *rq, in blk_mq_prep_dispatch_rq() argument
1289 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_prep_dispatch_rq()
1293 budget_token = blk_mq_get_dispatch_budget(rq->q); in blk_mq_prep_dispatch_rq()
1295 blk_mq_put_driver_tag(rq); in blk_mq_prep_dispatch_rq()
1298 blk_mq_set_rq_budget_token(rq, budget_token); in blk_mq_prep_dispatch_rq()
1301 if (!blk_mq_get_driver_tag(rq)) { in blk_mq_prep_dispatch_rq()
1309 if (!blk_mq_mark_tag_wait(hctx, rq)) { in blk_mq_prep_dispatch_rq()
1315 blk_mq_put_dispatch_budget(rq->q, budget_token); in blk_mq_prep_dispatch_rq()
1327 struct request *rq; in blk_mq_release_budgets() local
1329 list_for_each_entry(rq, list, queuelist) { in blk_mq_release_budgets()
1330 int budget_token = blk_mq_get_rq_budget_token(rq); in blk_mq_release_budgets()
1345 struct request *rq, *nxt; in blk_mq_dispatch_rq_list() local
1361 rq = list_first_entry(list, struct request, queuelist); in blk_mq_dispatch_rq_list()
1363 WARN_ON_ONCE(hctx != rq->mq_hctx); in blk_mq_dispatch_rq_list()
1364 prep = blk_mq_prep_dispatch_rq(rq, !nr_budgets); in blk_mq_dispatch_rq_list()
1368 list_del_init(&rq->queuelist); in blk_mq_dispatch_rq_list()
1370 bd.rq = rq; in blk_mq_dispatch_rq_list()
1398 blk_mq_handle_dev_resource(rq, list); in blk_mq_dispatch_rq_list()
1406 blk_mq_handle_zone_resource(rq, &zone_list); in blk_mq_dispatch_rq_list()
1411 blk_mq_end_request(rq, ret); in blk_mq_dispatch_rq_list()
1849 struct request *rq, in __blk_mq_insert_req_list() argument
1852 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_insert_req_list()
1857 trace_block_rq_insert(rq); in __blk_mq_insert_req_list()
1860 list_add(&rq->queuelist, &ctx->rq_lists[type]); in __blk_mq_insert_req_list()
1862 list_add_tail(&rq->queuelist, &ctx->rq_lists[type]); in __blk_mq_insert_req_list()
1865 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, in __blk_mq_insert_request() argument
1868 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_insert_request()
1872 __blk_mq_insert_req_list(hctx, rq, at_head); in __blk_mq_insert_request()
1885 void blk_mq_request_bypass_insert(struct request *rq, bool at_head, in blk_mq_request_bypass_insert() argument
1888 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_request_bypass_insert()
1892 list_add(&rq->queuelist, &hctx->dispatch); in blk_mq_request_bypass_insert()
1894 list_add_tail(&rq->queuelist, &hctx->dispatch); in blk_mq_request_bypass_insert()
1905 struct request *rq; in blk_mq_insert_requests() local
1912 list_for_each_entry(rq, list, queuelist) { in blk_mq_insert_requests()
1913 BUG_ON(rq->mq_ctx != ctx); in blk_mq_insert_requests()
1914 trace_block_rq_insert(rq); in blk_mq_insert_requests()
1952 struct request *rq, *head_rq = list_entry_rq(list.next); in blk_mq_flush_plug_list() local
1959 rq = list_entry_rq(pos); in blk_mq_flush_plug_list()
1960 BUG_ON(!rq->q); in blk_mq_flush_plug_list()
1961 if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) in blk_mq_flush_plug_list()
1973 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio, in blk_mq_bio_to_request() argument
1979 rq->cmd_flags |= REQ_FAILFAST_MASK; in blk_mq_bio_to_request()
1981 rq->__sector = bio->bi_iter.bi_sector; in blk_mq_bio_to_request()
1982 rq->write_hint = bio->bi_write_hint; in blk_mq_bio_to_request()
1983 blk_rq_bio_prep(rq, bio, nr_segs); in blk_mq_bio_to_request()
1986 err = blk_crypto_rq_bio_prep(rq, bio, GFP_NOIO); in blk_mq_bio_to_request()
1989 blk_account_io_start(rq); in blk_mq_bio_to_request()
1993 struct request *rq, in __blk_mq_issue_directly() argument
1996 struct request_queue *q = rq->q; in __blk_mq_issue_directly()
1998 .rq = rq, in __blk_mq_issue_directly()
2004 new_cookie = request_to_qc_t(hctx, rq); in __blk_mq_issue_directly()
2020 __blk_mq_requeue_request(rq); in __blk_mq_issue_directly()
2032 struct request *rq, in __blk_mq_try_issue_directly() argument
2036 struct request_queue *q = rq->q; in __blk_mq_try_issue_directly()
2060 blk_mq_set_rq_budget_token(rq, budget_token); in __blk_mq_try_issue_directly()
2062 if (!blk_mq_get_driver_tag(rq)) { in __blk_mq_try_issue_directly()
2067 return __blk_mq_issue_directly(hctx, rq, cookie, last); in __blk_mq_try_issue_directly()
2072 blk_mq_sched_insert_request(rq, false, run_queue, false); in __blk_mq_try_issue_directly()
2089 struct request *rq, blk_qc_t *cookie) in blk_mq_try_issue_directly() argument
2098 ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true); in blk_mq_try_issue_directly()
2100 blk_mq_request_bypass_insert(rq, false, true); in blk_mq_try_issue_directly()
2102 blk_mq_end_request(rq, ret); in blk_mq_try_issue_directly()
2107 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last) in blk_mq_request_issue_directly() argument
2112 struct blk_mq_hw_ctx *hctx = rq->mq_hctx; in blk_mq_request_issue_directly()
2115 ret = __blk_mq_try_issue_directly(hctx, rq, &unused_cookie, true, last); in blk_mq_request_issue_directly()
2129 struct request *rq = list_first_entry(list, struct request, in blk_mq_try_issue_list_directly() local
2132 list_del_init(&rq->queuelist); in blk_mq_try_issue_list_directly()
2133 ret = blk_mq_request_issue_directly(rq, list_empty(list)); in blk_mq_try_issue_list_directly()
2138 blk_mq_request_bypass_insert(rq, false, in blk_mq_try_issue_list_directly()
2142 blk_mq_end_request(rq, ret); in blk_mq_try_issue_list_directly()
2157 static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq) in blk_add_rq_to_plug() argument
2159 list_add_tail(&rq->queuelist, &plug->mq_list); in blk_add_rq_to_plug()
2166 if (tmp->q != rq->q) in blk_add_rq_to_plug()
2206 struct request *rq; in blk_mq_submit_bio() local
2234 rq = __blk_mq_alloc_request(&data); in blk_mq_submit_bio()
2235 if (unlikely(!rq)) { in blk_mq_submit_bio()
2244 rq_qos_track(q, rq, bio); in blk_mq_submit_bio()
2246 cookie = request_to_qc_t(data.hctx, rq); in blk_mq_submit_bio()
2248 blk_mq_bio_to_request(rq, bio, nr_segs); in blk_mq_submit_bio()
2250 ret = blk_crypto_rq_get_keyslot(rq); in blk_mq_submit_bio()
2254 blk_mq_free_request(rq); in blk_mq_submit_bio()
2261 blk_insert_flush(rq); in blk_mq_submit_bio()
2264 blk_mq_is_sbitmap_shared(rq->mq_hctx->flags) || in blk_mq_submit_bio()
2287 blk_add_rq_to_plug(plug, rq); in blk_mq_submit_bio()
2290 blk_mq_sched_insert_request(rq, false, true, true); in blk_mq_submit_bio()
2305 blk_add_rq_to_plug(plug, rq); in blk_mq_submit_bio()
2320 blk_mq_try_issue_directly(data.hctx, rq, &cookie); in blk_mq_submit_bio()
2323 blk_mq_sched_insert_request(rq, false, true, true); in blk_mq_submit_bio()
2353 struct request *rq = drv_tags->rqs[i]; in blk_mq_clear_rq_mapping() local
2354 unsigned long rq_addr = (unsigned long)rq; in blk_mq_clear_rq_mapping()
2357 WARN_ON_ONCE(refcount_read(&rq->ref) != 0); in blk_mq_clear_rq_mapping()
2358 cmpxchg(&drv_tags->rqs[i], rq, NULL); in blk_mq_clear_rq_mapping()
2382 struct request *rq = tags->static_rqs[i]; in blk_mq_free_rqs() local
2384 if (!rq) in blk_mq_free_rqs()
2386 set->ops->exit_request(set, rq, hctx_idx); in blk_mq_free_rqs()
2452 static int blk_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, in blk_mq_init_request() argument
2458 ret = set->ops->init_request(set, rq, hctx_idx, node); in blk_mq_init_request()
2463 WRITE_ONCE(rq->state, MQ_RQ_IDLE); in blk_mq_init_request()
2526 struct request *rq = p; in blk_mq_alloc_rqs() local
2528 tags->static_rqs[i] = rq; in blk_mq_alloc_rqs()
2529 if (blk_mq_init_request(set, rq, hctx_idx, node)) { in blk_mq_alloc_rqs()
2550 static bool blk_mq_has_request(struct request *rq, void *data, bool reserved) in blk_mq_has_request() argument
2554 if (rq->mq_hctx != iter_data->hctx) in blk_mq_has_request()
3864 struct request *rq) in blk_mq_poll_nsecs() argument
3885 bucket = blk_mq_poll_stats_bkt(rq); in blk_mq_poll_nsecs()
3896 struct request *rq) in blk_mq_poll_hybrid_sleep() argument
3903 if (rq->rq_flags & RQF_MQ_POLL_SLEPT) in blk_mq_poll_hybrid_sleep()
3915 nsecs = blk_mq_poll_nsecs(q, rq); in blk_mq_poll_hybrid_sleep()
3920 rq->rq_flags |= RQF_MQ_POLL_SLEPT; in blk_mq_poll_hybrid_sleep()
3933 if (blk_mq_rq_state(rq) == MQ_RQ_COMPLETE) in blk_mq_poll_hybrid_sleep()
3951 struct request *rq; in blk_mq_poll_hybrid() local
3957 rq = blk_mq_tag_to_rq(hctx->tags, blk_qc_t_to_tag(cookie)); in blk_mq_poll_hybrid()
3959 rq = blk_mq_tag_to_rq(hctx->sched_tags, blk_qc_t_to_tag(cookie)); in blk_mq_poll_hybrid()
3966 if (!rq) in blk_mq_poll_hybrid()
3970 return blk_mq_poll_hybrid_sleep(q, rq); in blk_mq_poll_hybrid()
4040 unsigned int blk_mq_rq_cpu(struct request *rq) in blk_mq_rq_cpu() argument
4042 return rq->mq_ctx->cpu; in blk_mq_rq_cpu()