Lines Matching refs:rq
124 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
126 memset(rq, 0, sizeof(*rq)); in blk_rq_init()
128 INIT_LIST_HEAD(&rq->queuelist); in blk_rq_init()
129 INIT_LIST_HEAD(&rq->timeout_list); in blk_rq_init()
130 rq->cpu = -1; in blk_rq_init()
131 rq->q = q; in blk_rq_init()
132 rq->__sector = (sector_t) -1; in blk_rq_init()
133 INIT_HLIST_NODE(&rq->hash); in blk_rq_init()
134 RB_CLEAR_NODE(&rq->rb_node); in blk_rq_init()
135 rq->cmd = rq->__cmd; in blk_rq_init()
136 rq->cmd_len = BLK_MAX_CDB; in blk_rq_init()
137 rq->tag = -1; in blk_rq_init()
138 rq->start_time = jiffies; in blk_rq_init()
139 set_start_time_ns(rq); in blk_rq_init()
140 rq->part = NULL; in blk_rq_init()
144 static void req_bio_endio(struct request *rq, struct bio *bio, in req_bio_endio() argument
150 if (unlikely(rq->cmd_flags & REQ_QUIET)) in req_bio_endio()
156 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) in req_bio_endio()
160 void blk_dump_rq_flags(struct request *rq, char *msg) in blk_dump_rq_flags() argument
165 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, in blk_dump_rq_flags()
166 (unsigned long long) rq->cmd_flags); in blk_dump_rq_flags()
169 (unsigned long long)blk_rq_pos(rq), in blk_dump_rq_flags()
170 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); in blk_dump_rq_flags()
172 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags()
174 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { in blk_dump_rq_flags()
177 printk("%02x ", rq->cmd[bit]); in blk_dump_rq_flags()
894 static inline void blk_free_request(struct request_list *rl, struct request *rq) in blk_free_request() argument
896 if (rq->cmd_flags & REQ_ELVPRIV) { in blk_free_request()
897 elv_put_request(rl->q, rq); in blk_free_request()
898 if (rq->elv.icq) in blk_free_request()
899 put_io_context(rq->elv.icq->ioc); in blk_free_request()
902 mempool_free(rq, rl->rq_pool); in blk_free_request()
1068 struct request *rq; in __get_request() local
1142 rq = mempool_alloc(rl->rq_pool, gfp_mask); in __get_request()
1143 if (!rq) in __get_request()
1146 blk_rq_init(q, rq); in __get_request()
1147 blk_rq_set_rl(rq, rl); in __get_request()
1148 rq->cmd_flags = rw_flags | REQ_ALLOCED; in __get_request()
1159 rq->elv.icq = icq; in __get_request()
1160 if (unlikely(elv_set_request(q, rq, bio, gfp_mask))) in __get_request()
1178 return rq; in __get_request()
1190 rq->cmd_flags &= ~REQ_ELVPRIV; in __get_request()
1191 rq->elv.icq = NULL; in __get_request()
1242 struct request *rq; in get_request() local
1246 rq = __get_request(rl, rw_flags, bio, gfp_mask); in get_request()
1247 if (!IS_ERR(rq)) in get_request()
1248 return rq; in get_request()
1252 return rq; in get_request()
1280 struct request *rq; in blk_old_get_request() local
1288 rq = get_request(q, rw, NULL, gfp_mask); in blk_old_get_request()
1289 if (IS_ERR(rq)) in blk_old_get_request()
1293 return rq; in blk_old_get_request()
1339 struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); in blk_make_request() local
1341 if (IS_ERR(rq)) in blk_make_request()
1342 return rq; in blk_make_request()
1344 blk_rq_set_block_pc(rq); in blk_make_request()
1351 ret = blk_rq_append_bio(q, rq, bounce_bio); in blk_make_request()
1353 blk_put_request(rq); in blk_make_request()
1358 return rq; in blk_make_request()
1367 void blk_rq_set_block_pc(struct request *rq) in blk_rq_set_block_pc() argument
1369 rq->cmd_type = REQ_TYPE_BLOCK_PC; in blk_rq_set_block_pc()
1370 rq->__data_len = 0; in blk_rq_set_block_pc()
1371 rq->__sector = (sector_t) -1; in blk_rq_set_block_pc()
1372 rq->bio = rq->biotail = NULL; in blk_rq_set_block_pc()
1373 memset(rq->__cmd, 0, sizeof(rq->__cmd)); in blk_rq_set_block_pc()
1387 void blk_requeue_request(struct request_queue *q, struct request *rq) in blk_requeue_request() argument
1389 blk_delete_timer(rq); in blk_requeue_request()
1390 blk_clear_rq_complete(rq); in blk_requeue_request()
1391 trace_block_rq_requeue(q, rq); in blk_requeue_request()
1393 if (rq->cmd_flags & REQ_QUEUED) in blk_requeue_request()
1394 blk_queue_end_tag(q, rq); in blk_requeue_request()
1396 BUG_ON(blk_queued_rq(rq)); in blk_requeue_request()
1398 elv_requeue_request(q, rq); in blk_requeue_request()
1402 static void add_acct_request(struct request_queue *q, struct request *rq, in add_acct_request() argument
1405 blk_account_io_start(rq, true); in add_acct_request()
1406 __elv_add_request(q, rq, where); in add_acct_request()
1453 static void blk_pm_put_request(struct request *rq) in blk_pm_put_request() argument
1455 if (rq->q->dev && !(rq->cmd_flags & REQ_PM) && !--rq->q->nr_pending) in blk_pm_put_request()
1456 pm_runtime_mark_last_busy(rq->q->dev); in blk_pm_put_request()
1459 static inline void blk_pm_put_request(struct request *rq) {} in blk_pm_put_request() argument
1529 void blk_add_request_payload(struct request *rq, struct page *page, in blk_add_request_payload() argument
1532 struct bio *bio = rq->bio; in blk_add_request_payload()
1542 rq->__data_len = rq->resid_len = len; in blk_add_request_payload()
1543 rq->nr_phys_segments = 1; in blk_add_request_payload()
1620 struct request *rq; in blk_attempt_plug_merge() local
1634 list_for_each_entry_reverse(rq, plug_list, queuelist) { in blk_attempt_plug_merge()
1637 if (rq->q == q) { in blk_attempt_plug_merge()
1645 *same_queue_rq = rq; in blk_attempt_plug_merge()
1648 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) in blk_attempt_plug_merge()
1651 el_ret = blk_try_merge(rq, bio); in blk_attempt_plug_merge()
1653 ret = bio_attempt_back_merge(q, rq, bio); in blk_attempt_plug_merge()
1657 ret = bio_attempt_front_merge(q, rq, bio); in blk_attempt_plug_merge()
1669 struct request *rq; in blk_plug_queued_count() local
1682 list_for_each_entry(rq, plug_list, queuelist) { in blk_plug_queued_count()
1683 if (rq->q == q) in blk_plug_queued_count()
2176 struct request *rq) in blk_cloned_rq_check_limits() argument
2178 if (blk_rq_sectors(rq) > blk_queue_get_max_sectors(q, rq->cmd_flags)) { in blk_cloned_rq_check_limits()
2189 blk_recalc_rq_segments(rq); in blk_cloned_rq_check_limits()
2190 if (rq->nr_phys_segments > queue_max_segments(q)) { in blk_cloned_rq_check_limits()
2203 int blk_insert_cloned_request(struct request_queue *q, struct request *rq) in blk_insert_cloned_request() argument
2208 if (blk_cloned_rq_check_limits(q, rq)) in blk_insert_cloned_request()
2211 if (rq->rq_disk && in blk_insert_cloned_request()
2212 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) in blk_insert_cloned_request()
2217 blk_account_io_start(rq, true); in blk_insert_cloned_request()
2218 blk_mq_insert_request(rq, false, true, false); in blk_insert_cloned_request()
2232 BUG_ON(blk_queued_rq(rq)); in blk_insert_cloned_request()
2234 if (rq->cmd_flags & (REQ_FLUSH|REQ_FUA)) in blk_insert_cloned_request()
2237 add_acct_request(q, rq, where); in blk_insert_cloned_request()
2262 unsigned int blk_rq_err_bytes(const struct request *rq) in blk_rq_err_bytes() argument
2264 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; in blk_rq_err_bytes()
2268 if (!(rq->cmd_flags & REQ_MIXED_MERGE)) in blk_rq_err_bytes()
2269 return blk_rq_bytes(rq); in blk_rq_err_bytes()
2278 for (bio = rq->bio; bio; bio = bio->bi_next) { in blk_rq_err_bytes()
2285 BUG_ON(blk_rq_bytes(rq) && !bytes); in blk_rq_err_bytes()
2336 struct request *rq) in blk_pm_peek_request() argument
2339 (q->rpm_status != RPM_ACTIVE && !(rq->cmd_flags & REQ_PM)))) in blk_pm_peek_request()
2342 return rq; in blk_pm_peek_request()
2346 struct request *rq) in blk_pm_peek_request() argument
2348 return rq; in blk_pm_peek_request()
2352 void blk_account_io_start(struct request *rq, bool new_io) in blk_account_io_start() argument
2355 int rw = rq_data_dir(rq); in blk_account_io_start()
2358 if (!blk_do_io_stat(rq)) in blk_account_io_start()
2364 part = rq->part; in blk_account_io_start()
2367 part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); in blk_account_io_start()
2377 part = &rq->rq_disk->part0; in blk_account_io_start()
2382 rq->part = part; in blk_account_io_start()
2406 struct request *rq; in blk_peek_request() local
2409 while ((rq = __elv_next_request(q)) != NULL) { in blk_peek_request()
2411 rq = blk_pm_peek_request(q, rq); in blk_peek_request()
2412 if (!rq) in blk_peek_request()
2415 if (!(rq->cmd_flags & REQ_STARTED)) { in blk_peek_request()
2421 if (rq->cmd_flags & REQ_SORTED) in blk_peek_request()
2422 elv_activate_rq(q, rq); in blk_peek_request()
2429 rq->cmd_flags |= REQ_STARTED; in blk_peek_request()
2430 trace_block_rq_issue(q, rq); in blk_peek_request()
2433 if (!q->boundary_rq || q->boundary_rq == rq) { in blk_peek_request()
2434 q->end_sector = rq_end_sector(rq); in blk_peek_request()
2438 if (rq->cmd_flags & REQ_DONTPREP) in blk_peek_request()
2441 if (q->dma_drain_size && blk_rq_bytes(rq)) { in blk_peek_request()
2448 rq->nr_phys_segments++; in blk_peek_request()
2454 ret = q->prep_rq_fn(q, rq); in blk_peek_request()
2464 if (q->dma_drain_size && blk_rq_bytes(rq) && in blk_peek_request()
2465 !(rq->cmd_flags & REQ_DONTPREP)) { in blk_peek_request()
2470 --rq->nr_phys_segments; in blk_peek_request()
2473 rq = NULL; in blk_peek_request()
2476 rq->cmd_flags |= REQ_QUIET; in blk_peek_request()
2481 blk_start_request(rq); in blk_peek_request()
2482 __blk_end_request_all(rq, -EIO); in blk_peek_request()
2489 return rq; in blk_peek_request()
2493 void blk_dequeue_request(struct request *rq) in blk_dequeue_request() argument
2495 struct request_queue *q = rq->q; in blk_dequeue_request()
2497 BUG_ON(list_empty(&rq->queuelist)); in blk_dequeue_request()
2498 BUG_ON(ELV_ON_HASH(rq)); in blk_dequeue_request()
2500 list_del_init(&rq->queuelist); in blk_dequeue_request()
2507 if (blk_account_rq(rq)) { in blk_dequeue_request()
2508 q->in_flight[rq_is_sync(rq)]++; in blk_dequeue_request()
2509 set_io_start_time_ns(rq); in blk_dequeue_request()
2561 struct request *rq; in blk_fetch_request() local
2563 rq = blk_peek_request(q); in blk_fetch_request()
2564 if (rq) in blk_fetch_request()
2565 blk_start_request(rq); in blk_fetch_request()
2566 return rq; in blk_fetch_request()
2707 static bool blk_update_bidi_request(struct request *rq, int error, in blk_update_bidi_request() argument
2711 if (blk_update_request(rq, error, nr_bytes)) in blk_update_bidi_request()
2715 if (unlikely(blk_bidi_rq(rq)) && in blk_update_bidi_request()
2716 blk_update_request(rq->next_rq, error, bidi_bytes)) in blk_update_bidi_request()
2719 if (blk_queue_add_random(rq->q)) in blk_update_bidi_request()
2720 add_disk_randomness(rq->rq_disk); in blk_update_bidi_request()
2793 static bool blk_end_bidi_request(struct request *rq, int error, in blk_end_bidi_request() argument
2796 struct request_queue *q = rq->q; in blk_end_bidi_request()
2799 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) in blk_end_bidi_request()
2803 blk_finish_request(rq, error); in blk_end_bidi_request()
2824 bool __blk_end_bidi_request(struct request *rq, int error, in __blk_end_bidi_request() argument
2827 if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) in __blk_end_bidi_request()
2830 blk_finish_request(rq, error); in __blk_end_bidi_request()
2849 bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) in blk_end_request() argument
2851 return blk_end_bidi_request(rq, error, nr_bytes, 0); in blk_end_request()
2863 void blk_end_request_all(struct request *rq, int error) in blk_end_request_all() argument
2868 if (unlikely(blk_bidi_rq(rq))) in blk_end_request_all()
2869 bidi_bytes = blk_rq_bytes(rq->next_rq); in blk_end_request_all()
2871 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); in blk_end_request_all()
2888 bool blk_end_request_cur(struct request *rq, int error) in blk_end_request_cur() argument
2890 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); in blk_end_request_cur()
2906 bool blk_end_request_err(struct request *rq, int error) in blk_end_request_err() argument
2909 return blk_end_request(rq, error, blk_rq_err_bytes(rq)); in blk_end_request_err()
2926 bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) in __blk_end_request() argument
2928 return __blk_end_bidi_request(rq, error, nr_bytes, 0); in __blk_end_request()
2940 void __blk_end_request_all(struct request *rq, int error) in __blk_end_request_all() argument
2945 if (unlikely(blk_bidi_rq(rq))) in __blk_end_request_all()
2946 bidi_bytes = blk_rq_bytes(rq->next_rq); in __blk_end_request_all()
2948 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); in __blk_end_request_all()
2966 bool __blk_end_request_cur(struct request *rq, int error) in __blk_end_request_cur() argument
2968 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); in __blk_end_request_cur()
2985 bool __blk_end_request_err(struct request *rq, int error) in __blk_end_request_err() argument
2988 return __blk_end_request(rq, error, blk_rq_err_bytes(rq)); in __blk_end_request_err()
2992 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, in blk_rq_bio_prep() argument
2996 rq->cmd_flags |= bio->bi_rw & REQ_WRITE; in blk_rq_bio_prep()
2999 rq->nr_phys_segments = bio_phys_segments(q, bio); in blk_rq_bio_prep()
3001 rq->__data_len = bio->bi_iter.bi_size; in blk_rq_bio_prep()
3002 rq->bio = rq->biotail = bio; in blk_rq_bio_prep()
3005 rq->rq_disk = bio->bi_bdev->bd_disk; in blk_rq_bio_prep()
3016 void rq_flush_dcache_pages(struct request *rq) in rq_flush_dcache_pages() argument
3021 rq_for_each_segment(bvec, rq, iter) in rq_flush_dcache_pages()
3062 void blk_rq_unprep_clone(struct request *rq) in blk_rq_unprep_clone() argument
3066 while ((bio = rq->bio) != NULL) { in blk_rq_unprep_clone()
3067 rq->bio = bio->bi_next; in blk_rq_unprep_clone()
3109 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, in blk_rq_prep_clone() argument
3127 if (rq->bio) { in blk_rq_prep_clone()
3128 rq->biotail->bi_next = bio; in blk_rq_prep_clone()
3129 rq->biotail = bio; in blk_rq_prep_clone()
3131 rq->bio = rq->biotail = bio; in blk_rq_prep_clone()
3134 __blk_rq_prep_clone(rq, rq_src); in blk_rq_prep_clone()
3141 blk_rq_unprep_clone(rq); in blk_rq_prep_clone()
3276 struct request *rq; in blk_flush_plug_list() local
3301 rq = list_entry_rq(list.next); in blk_flush_plug_list()
3302 list_del_init(&rq->queuelist); in blk_flush_plug_list()
3303 BUG_ON(!rq->q); in blk_flush_plug_list()
3304 if (rq->q != q) { in blk_flush_plug_list()
3310 q = rq->q; in blk_flush_plug_list()
3319 __blk_end_request_all(rq, -ENODEV); in blk_flush_plug_list()
3326 if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) in blk_flush_plug_list()
3327 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); in blk_flush_plug_list()
3329 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); in blk_flush_plug_list()