Lines Matching refs:rq
120 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
122 memset(rq, 0, sizeof(*rq)); in blk_rq_init()
124 INIT_LIST_HEAD(&rq->queuelist); in blk_rq_init()
125 rq->q = q; in blk_rq_init()
126 rq->__sector = (sector_t) -1; in blk_rq_init()
127 INIT_HLIST_NODE(&rq->hash); in blk_rq_init()
128 RB_CLEAR_NODE(&rq->rb_node); in blk_rq_init()
129 rq->tag = BLK_MQ_NO_TAG; in blk_rq_init()
130 rq->internal_tag = BLK_MQ_NO_TAG; in blk_rq_init()
131 rq->start_time_ns = ktime_get_ns(); in blk_rq_init()
132 rq->part = NULL; in blk_rq_init()
133 blk_crypto_rq_set_defaults(rq); in blk_rq_init()
248 static void req_bio_endio(struct request *rq, struct bio *bio, in req_bio_endio() argument
254 if (unlikely(rq->rq_flags & RQF_QUIET)) in req_bio_endio()
259 if (req_op(rq) == REQ_OP_ZONE_APPEND && error == BLK_STS_OK) { in req_bio_endio()
267 bio->bi_iter.bi_sector = rq->__sector; in req_bio_endio()
271 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) in req_bio_endio()
275 void blk_dump_rq_flags(struct request *rq, char *msg) in blk_dump_rq_flags() argument
278 rq->rq_disk ? rq->rq_disk->disk_name : "?", in blk_dump_rq_flags()
279 (unsigned long long) rq->cmd_flags); in blk_dump_rq_flags()
282 (unsigned long long)blk_rq_pos(rq), in blk_dump_rq_flags()
283 blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); in blk_dump_rq_flags()
285 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags()
1159 struct request *rq) in blk_cloned_rq_check_limits() argument
1161 unsigned int max_sectors = blk_queue_get_max_sectors(q, req_op(rq)); in blk_cloned_rq_check_limits()
1163 if (blk_rq_sectors(rq) > max_sectors) { in blk_cloned_rq_check_limits()
1178 __func__, blk_rq_sectors(rq), max_sectors); in blk_cloned_rq_check_limits()
1188 rq->nr_phys_segments = blk_recalc_rq_segments(rq); in blk_cloned_rq_check_limits()
1189 if (rq->nr_phys_segments > queue_max_segments(q)) { in blk_cloned_rq_check_limits()
1191 __func__, rq->nr_phys_segments, queue_max_segments(q)); in blk_cloned_rq_check_limits()
1203 blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq) in blk_insert_cloned_request() argument
1207 ret = blk_cloned_rq_check_limits(q, rq); in blk_insert_cloned_request()
1211 if (rq->rq_disk && in blk_insert_cloned_request()
1212 should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq))) in blk_insert_cloned_request()
1215 if (blk_crypto_insert_cloned_request(rq)) in blk_insert_cloned_request()
1219 blk_account_io_start(rq); in blk_insert_cloned_request()
1226 return blk_mq_request_issue_directly(rq, true); in blk_insert_cloned_request()
1243 unsigned int blk_rq_err_bytes(const struct request *rq) in blk_rq_err_bytes() argument
1245 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK; in blk_rq_err_bytes()
1249 if (!(rq->rq_flags & RQF_MIXED_MERGE)) in blk_rq_err_bytes()
1250 return blk_rq_bytes(rq); in blk_rq_err_bytes()
1259 for (bio = rq->bio; bio; bio = bio->bi_next) { in blk_rq_err_bytes()
1266 BUG_ON(blk_rq_bytes(rq) && !bytes); in blk_rq_err_bytes()
1323 void blk_account_io_start(struct request *rq) in blk_account_io_start() argument
1325 if (!blk_do_io_stat(rq)) in blk_account_io_start()
1328 rq->part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); in blk_account_io_start()
1331 update_io_ticks(rq->part, jiffies, false); in blk_account_io_start()
1400 void blk_steal_bios(struct bio_list *list, struct request *rq) in blk_steal_bios() argument
1402 if (rq->bio) { in blk_steal_bios()
1404 list->tail->bi_next = rq->bio; in blk_steal_bios()
1406 list->head = rq->bio; in blk_steal_bios()
1407 list->tail = rq->biotail; in blk_steal_bios()
1409 rq->bio = NULL; in blk_steal_bios()
1410 rq->biotail = NULL; in blk_steal_bios()
1413 rq->__data_len = 0; in blk_steal_bios()
1542 void rq_flush_dcache_pages(struct request *rq) in rq_flush_dcache_pages() argument
1547 rq_for_each_segment(bvec, rq, iter) in rq_flush_dcache_pages()
1588 void blk_rq_unprep_clone(struct request *rq) in blk_rq_unprep_clone() argument
1592 while ((bio = rq->bio) != NULL) { in blk_rq_unprep_clone()
1593 rq->bio = bio->bi_next; in blk_rq_unprep_clone()
1617 int blk_rq_prep_clone(struct request *rq, struct request *rq_src, in blk_rq_prep_clone() argument
1635 if (rq->bio) { in blk_rq_prep_clone()
1636 rq->biotail->bi_next = bio; in blk_rq_prep_clone()
1637 rq->biotail = bio; in blk_rq_prep_clone()
1639 rq->bio = rq->biotail = bio; in blk_rq_prep_clone()
1645 rq->__sector = blk_rq_pos(rq_src); in blk_rq_prep_clone()
1646 rq->__data_len = blk_rq_bytes(rq_src); in blk_rq_prep_clone()
1648 rq->rq_flags |= RQF_SPECIAL_PAYLOAD; in blk_rq_prep_clone()
1649 rq->special_vec = rq_src->special_vec; in blk_rq_prep_clone()
1651 rq->nr_phys_segments = rq_src->nr_phys_segments; in blk_rq_prep_clone()
1652 rq->ioprio = rq_src->ioprio; in blk_rq_prep_clone()
1654 if (rq->bio && blk_crypto_rq_bio_prep(rq, rq->bio, gfp_mask) < 0) in blk_rq_prep_clone()
1662 blk_rq_unprep_clone(rq); in blk_rq_prep_clone()