Lines Matching refs:rq
162 struct request *rq, unsigned int rw_flags) in blk_mq_rq_ctx_init() argument
167 INIT_LIST_HEAD(&rq->queuelist); in blk_mq_rq_ctx_init()
169 rq->q = q; in blk_mq_rq_ctx_init()
170 rq->mq_ctx = ctx; in blk_mq_rq_ctx_init()
171 rq->cmd_flags |= rw_flags; in blk_mq_rq_ctx_init()
173 rq->cpu = -1; in blk_mq_rq_ctx_init()
174 INIT_HLIST_NODE(&rq->hash); in blk_mq_rq_ctx_init()
175 RB_CLEAR_NODE(&rq->rb_node); in blk_mq_rq_ctx_init()
176 rq->rq_disk = NULL; in blk_mq_rq_ctx_init()
177 rq->part = NULL; in blk_mq_rq_ctx_init()
178 rq->start_time = jiffies; in blk_mq_rq_ctx_init()
180 rq->rl = NULL; in blk_mq_rq_ctx_init()
181 set_start_time_ns(rq); in blk_mq_rq_ctx_init()
182 rq->io_start_time_ns = 0; in blk_mq_rq_ctx_init()
184 rq->nr_phys_segments = 0; in blk_mq_rq_ctx_init()
186 rq->nr_integrity_segments = 0; in blk_mq_rq_ctx_init()
188 rq->special = NULL; in blk_mq_rq_ctx_init()
190 rq->errors = 0; in blk_mq_rq_ctx_init()
192 rq->cmd = rq->__cmd; in blk_mq_rq_ctx_init()
194 rq->extra_len = 0; in blk_mq_rq_ctx_init()
195 rq->sense_len = 0; in blk_mq_rq_ctx_init()
196 rq->resid_len = 0; in blk_mq_rq_ctx_init()
197 rq->sense = NULL; in blk_mq_rq_ctx_init()
199 INIT_LIST_HEAD(&rq->timeout_list); in blk_mq_rq_ctx_init()
200 rq->timeout = 0; in blk_mq_rq_ctx_init()
202 rq->end_io = NULL; in blk_mq_rq_ctx_init()
203 rq->end_io_data = NULL; in blk_mq_rq_ctx_init()
204 rq->next_rq = NULL; in blk_mq_rq_ctx_init()
212 struct request *rq; in __blk_mq_alloc_request() local
217 rq = data->hctx->tags->rqs[tag]; in __blk_mq_alloc_request()
220 rq->cmd_flags = REQ_MQ_INFLIGHT; in __blk_mq_alloc_request()
224 rq->tag = tag; in __blk_mq_alloc_request()
225 blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw); in __blk_mq_alloc_request()
226 return rq; in __blk_mq_alloc_request()
237 struct request *rq; in blk_mq_alloc_request() local
250 rq = __blk_mq_alloc_request(&alloc_data, rw); in blk_mq_alloc_request()
251 if (!rq && (gfp & __GFP_DIRECT_RECLAIM)) { in blk_mq_alloc_request()
259 rq = __blk_mq_alloc_request(&alloc_data, rw); in blk_mq_alloc_request()
263 if (!rq) { in blk_mq_alloc_request()
267 return rq; in blk_mq_alloc_request()
272 struct blk_mq_ctx *ctx, struct request *rq) in __blk_mq_free_request() argument
274 const int tag = rq->tag; in __blk_mq_free_request()
275 struct request_queue *q = rq->q; in __blk_mq_free_request()
277 if (rq->cmd_flags & REQ_MQ_INFLIGHT) in __blk_mq_free_request()
279 rq->cmd_flags = 0; in __blk_mq_free_request()
281 clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); in __blk_mq_free_request()
286 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq) in blk_mq_free_hctx_request() argument
288 struct blk_mq_ctx *ctx = rq->mq_ctx; in blk_mq_free_hctx_request()
290 ctx->rq_completed[rq_is_sync(rq)]++; in blk_mq_free_hctx_request()
291 __blk_mq_free_request(hctx, ctx, rq); in blk_mq_free_hctx_request()
296 void blk_mq_free_request(struct request *rq) in blk_mq_free_request() argument
299 struct request_queue *q = rq->q; in blk_mq_free_request()
301 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); in blk_mq_free_request()
302 blk_mq_free_hctx_request(hctx, rq); in blk_mq_free_request()
306 inline void __blk_mq_end_request(struct request *rq, int error) in __blk_mq_end_request() argument
308 blk_account_io_done(rq); in __blk_mq_end_request()
310 if (rq->end_io) { in __blk_mq_end_request()
311 rq->end_io(rq, error); in __blk_mq_end_request()
313 if (unlikely(blk_bidi_rq(rq))) in __blk_mq_end_request()
314 blk_mq_free_request(rq->next_rq); in __blk_mq_end_request()
315 blk_mq_free_request(rq); in __blk_mq_end_request()
320 void blk_mq_end_request(struct request *rq, int error) in blk_mq_end_request() argument
322 if (blk_update_request(rq, error, blk_rq_bytes(rq))) in blk_mq_end_request()
324 __blk_mq_end_request(rq, error); in blk_mq_end_request()
330 struct request *rq = data; in __blk_mq_complete_request_remote() local
332 rq->q->softirq_done_fn(rq); in __blk_mq_complete_request_remote()
335 static void blk_mq_ipi_complete_request(struct request *rq) in blk_mq_ipi_complete_request() argument
337 struct blk_mq_ctx *ctx = rq->mq_ctx; in blk_mq_ipi_complete_request()
341 if (!test_bit(QUEUE_FLAG_SAME_COMP, &rq->q->queue_flags)) { in blk_mq_ipi_complete_request()
342 rq->q->softirq_done_fn(rq); in blk_mq_ipi_complete_request()
347 if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) in blk_mq_ipi_complete_request()
351 rq->csd.func = __blk_mq_complete_request_remote; in blk_mq_ipi_complete_request()
352 rq->csd.info = rq; in blk_mq_ipi_complete_request()
353 rq->csd.flags = 0; in blk_mq_ipi_complete_request()
354 smp_call_function_single_async(ctx->cpu, &rq->csd); in blk_mq_ipi_complete_request()
356 rq->q->softirq_done_fn(rq); in blk_mq_ipi_complete_request()
361 static void __blk_mq_complete_request(struct request *rq) in __blk_mq_complete_request() argument
363 struct request_queue *q = rq->q; in __blk_mq_complete_request()
366 blk_mq_end_request(rq, rq->errors); in __blk_mq_complete_request()
368 blk_mq_ipi_complete_request(rq); in __blk_mq_complete_request()
379 void blk_mq_complete_request(struct request *rq, int error) in blk_mq_complete_request() argument
381 struct request_queue *q = rq->q; in blk_mq_complete_request()
385 if (!blk_mark_rq_complete(rq)) { in blk_mq_complete_request()
386 rq->errors = error; in blk_mq_complete_request()
387 __blk_mq_complete_request(rq); in blk_mq_complete_request()
392 int blk_mq_request_started(struct request *rq) in blk_mq_request_started() argument
394 return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); in blk_mq_request_started()
398 void blk_mq_start_request(struct request *rq) in blk_mq_start_request() argument
400 struct request_queue *q = rq->q; in blk_mq_start_request()
402 trace_block_rq_issue(q, rq); in blk_mq_start_request()
404 rq->resid_len = blk_rq_bytes(rq); in blk_mq_start_request()
405 if (unlikely(blk_bidi_rq(rq))) in blk_mq_start_request()
406 rq->next_rq->resid_len = blk_rq_bytes(rq->next_rq); in blk_mq_start_request()
408 blk_add_timer(rq); in blk_mq_start_request()
422 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) in blk_mq_start_request()
423 set_bit(REQ_ATOM_STARTED, &rq->atomic_flags); in blk_mq_start_request()
424 if (test_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags)) in blk_mq_start_request()
425 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); in blk_mq_start_request()
427 if (q->dma_drain_size && blk_rq_bytes(rq)) { in blk_mq_start_request()
433 rq->nr_phys_segments++; in blk_mq_start_request()
438 static void __blk_mq_requeue_request(struct request *rq) in __blk_mq_requeue_request() argument
440 struct request_queue *q = rq->q; in __blk_mq_requeue_request()
442 trace_block_rq_requeue(q, rq); in __blk_mq_requeue_request()
444 if (test_and_clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { in __blk_mq_requeue_request()
445 if (q->dma_drain_size && blk_rq_bytes(rq)) in __blk_mq_requeue_request()
446 rq->nr_phys_segments--; in __blk_mq_requeue_request()
450 void blk_mq_requeue_request(struct request *rq) in blk_mq_requeue_request() argument
452 __blk_mq_requeue_request(rq); in blk_mq_requeue_request()
454 BUG_ON(blk_queued_rq(rq)); in blk_mq_requeue_request()
455 blk_mq_add_to_requeue_list(rq, true); in blk_mq_requeue_request()
464 struct request *rq, *next; in blk_mq_requeue_work() local
471 list_for_each_entry_safe(rq, next, &rq_list, queuelist) { in blk_mq_requeue_work()
472 if (!(rq->cmd_flags & REQ_SOFTBARRIER)) in blk_mq_requeue_work()
475 rq->cmd_flags &= ~REQ_SOFTBARRIER; in blk_mq_requeue_work()
476 list_del_init(&rq->queuelist); in blk_mq_requeue_work()
477 blk_mq_insert_request(rq, true, false, false); in blk_mq_requeue_work()
481 rq = list_entry(rq_list.next, struct request, queuelist); in blk_mq_requeue_work()
482 list_del_init(&rq->queuelist); in blk_mq_requeue_work()
483 blk_mq_insert_request(rq, false, false, false); in blk_mq_requeue_work()
493 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) in blk_mq_add_to_requeue_list() argument
495 struct request_queue *q = rq->q; in blk_mq_add_to_requeue_list()
502 BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER); in blk_mq_add_to_requeue_list()
506 rq->cmd_flags |= REQ_SOFTBARRIER; in blk_mq_add_to_requeue_list()
507 list_add(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
509 list_add_tail(&rq->queuelist, &q->requeue_list); in blk_mq_add_to_requeue_list()
537 struct request *rq; in blk_mq_abort_requeue_list() local
539 rq = list_first_entry(&rq_list, struct request, queuelist); in blk_mq_abort_requeue_list()
540 list_del_init(&rq->queuelist); in blk_mq_abort_requeue_list()
541 rq->errors = -EIO; in blk_mq_abort_requeue_list()
542 blk_mq_end_request(rq, rq->errors); in blk_mq_abort_requeue_list()
595 struct request *rq, void *priv, bool reserved) in blk_mq_check_expired() argument
599 if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { in blk_mq_check_expired()
604 if (unlikely(blk_queue_dying(rq->q))) { in blk_mq_check_expired()
605 rq->errors = -EIO; in blk_mq_check_expired()
606 blk_mq_end_request(rq, rq->errors); in blk_mq_check_expired()
610 if (rq->cmd_flags & REQ_NO_TIMEOUT) in blk_mq_check_expired()
613 if (time_after_eq(jiffies, rq->deadline)) { in blk_mq_check_expired()
614 if (!blk_mark_rq_complete(rq)) in blk_mq_check_expired()
615 blk_mq_rq_timed_out(rq, reserved); in blk_mq_check_expired()
616 } else if (!data->next_set || time_after(data->next, rq->deadline)) { in blk_mq_check_expired()
617 data->next = rq->deadline; in blk_mq_check_expired()
672 struct request *rq; in blk_mq_attempt_merge() local
675 list_for_each_entry_reverse(rq, &ctx->rq_list, queuelist) { in blk_mq_attempt_merge()
681 if (!blk_rq_merge_ok(rq, bio)) in blk_mq_attempt_merge()
684 el_ret = blk_try_merge(rq, bio); in blk_mq_attempt_merge()
686 if (bio_attempt_back_merge(q, rq, bio)) { in blk_mq_attempt_merge()
692 if (bio_attempt_front_merge(q, rq, bio)) { in blk_mq_attempt_merge()
746 struct request *rq; in __blk_mq_run_hw_queue() local
789 rq = list_first_entry(&rq_list, struct request, queuelist); in __blk_mq_run_hw_queue()
790 list_del_init(&rq->queuelist); in __blk_mq_run_hw_queue()
792 bd.rq = rq; in __blk_mq_run_hw_queue()
802 list_add(&rq->queuelist, &rq_list); in __blk_mq_run_hw_queue()
803 __blk_mq_requeue_request(rq); in __blk_mq_run_hw_queue()
808 rq->errors = -EIO; in __blk_mq_run_hw_queue()
809 blk_mq_end_request(rq, rq->errors); in __blk_mq_run_hw_queue()
994 struct request *rq, in __blk_mq_insert_req_list() argument
997 trace_block_rq_insert(hctx->queue, rq); in __blk_mq_insert_req_list()
1000 list_add(&rq->queuelist, &ctx->rq_list); in __blk_mq_insert_req_list()
1002 list_add_tail(&rq->queuelist, &ctx->rq_list); in __blk_mq_insert_req_list()
1006 struct request *rq, bool at_head) in __blk_mq_insert_request() argument
1008 struct blk_mq_ctx *ctx = rq->mq_ctx; in __blk_mq_insert_request()
1010 __blk_mq_insert_req_list(hctx, ctx, rq, at_head); in __blk_mq_insert_request()
1014 void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, in blk_mq_insert_request() argument
1017 struct request_queue *q = rq->q; in blk_mq_insert_request()
1019 struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx; in blk_mq_insert_request()
1023 rq->mq_ctx = ctx = current_ctx; in blk_mq_insert_request()
1028 __blk_mq_insert_request(hctx, rq, at_head); in blk_mq_insert_request()
1061 struct request *rq; in blk_mq_insert_requests() local
1063 rq = list_first_entry(list, struct request, queuelist); in blk_mq_insert_requests()
1064 list_del_init(&rq->queuelist); in blk_mq_insert_requests()
1065 rq->mq_ctx = ctx; in blk_mq_insert_requests()
1066 __blk_mq_insert_req_list(hctx, ctx, rq, false); in blk_mq_insert_requests()
1089 struct request *rq; in blk_mq_flush_plug_list() local
1103 rq = list_entry_rq(list.next); in blk_mq_flush_plug_list()
1104 list_del_init(&rq->queuelist); in blk_mq_flush_plug_list()
1105 BUG_ON(!rq->q); in blk_mq_flush_plug_list()
1106 if (rq->mq_ctx != this_ctx) { in blk_mq_flush_plug_list()
1113 this_ctx = rq->mq_ctx; in blk_mq_flush_plug_list()
1114 this_q = rq->q; in blk_mq_flush_plug_list()
1119 list_add_tail(&rq->queuelist, &ctx_list); in blk_mq_flush_plug_list()
1132 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) in blk_mq_bio_to_request() argument
1134 init_request_from_bio(rq, bio); in blk_mq_bio_to_request()
1136 if (blk_do_io_stat(rq)) in blk_mq_bio_to_request()
1137 blk_account_io_start(rq, 1); in blk_mq_bio_to_request()
1148 struct request *rq, struct bio *bio) in blk_mq_merge_queue_io() argument
1151 blk_mq_bio_to_request(rq, bio); in blk_mq_merge_queue_io()
1154 __blk_mq_insert_request(hctx, rq, false); in blk_mq_merge_queue_io()
1162 blk_mq_bio_to_request(rq, bio); in blk_mq_merge_queue_io()
1167 __blk_mq_free_request(hctx, ctx, rq); in blk_mq_merge_queue_io()
1183 struct request *rq; in blk_mq_map_request() local
1197 rq = __blk_mq_alloc_request(&alloc_data, rw); in blk_mq_map_request()
1198 if (unlikely(!rq)) { in blk_mq_map_request()
1207 rq = __blk_mq_alloc_request(&alloc_data, rw); in blk_mq_map_request()
1215 return rq; in blk_mq_map_request()
1218 static int blk_mq_direct_issue_request(struct request *rq, blk_qc_t *cookie) in blk_mq_direct_issue_request() argument
1221 struct request_queue *q = rq->q; in blk_mq_direct_issue_request()
1223 rq->mq_ctx->cpu); in blk_mq_direct_issue_request()
1225 .rq = rq, in blk_mq_direct_issue_request()
1229 blk_qc_t new_cookie = blk_tag_to_qc_t(rq->tag, hctx->queue_num); in blk_mq_direct_issue_request()
1242 __blk_mq_requeue_request(rq); in blk_mq_direct_issue_request()
1246 rq->errors = -EIO; in blk_mq_direct_issue_request()
1247 blk_mq_end_request(rq, rq->errors); in blk_mq_direct_issue_request()
1264 struct request *rq; in blk_mq_make_request() local
1283 rq = blk_mq_map_request(q, bio, &data); in blk_mq_make_request()
1284 if (unlikely(!rq)) in blk_mq_make_request()
1287 cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num); in blk_mq_make_request()
1290 blk_mq_bio_to_request(rq, bio); in blk_mq_make_request()
1291 blk_insert_flush(rq); in blk_mq_make_request()
1305 blk_mq_bio_to_request(rq, bio); in blk_mq_make_request()
1322 list_add_tail(&rq->queuelist, &plug->mq_list); in blk_mq_make_request()
1324 old_rq = rq; in blk_mq_make_request()
1334 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { in blk_mq_make_request()
1360 struct request *rq; in blk_sq_make_request() local
1378 rq = blk_mq_map_request(q, bio, &data); in blk_sq_make_request()
1379 if (unlikely(!rq)) in blk_sq_make_request()
1382 cookie = blk_tag_to_qc_t(rq->tag, data.hctx->queue_num); in blk_sq_make_request()
1385 blk_mq_bio_to_request(rq, bio); in blk_sq_make_request()
1386 blk_insert_flush(rq); in blk_sq_make_request()
1397 blk_mq_bio_to_request(rq, bio); in blk_sq_make_request()
1408 list_add_tail(&rq->queuelist, &plug->mq_list); in blk_sq_make_request()
1412 if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) { in blk_sq_make_request()
1614 struct request *rq; in blk_mq_hctx_cpu_offline() local
1616 rq = list_first_entry(&tmp, struct request, queuelist); in blk_mq_hctx_cpu_offline()
1617 rq->mq_ctx = ctx; in blk_mq_hctx_cpu_offline()
1618 list_move_tail(&rq->queuelist, &ctx->rq_list); in blk_mq_hctx_cpu_offline()