Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 25 of 35) sorted by relevance

12

/block/
Dblk-flush.c98 static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq) in blk_flush_policy() argument
102 if (blk_rq_sectors(rq)) in blk_flush_policy()
106 if (rq->cmd_flags & REQ_PREFLUSH) in blk_flush_policy()
109 (rq->cmd_flags & REQ_FUA)) in blk_flush_policy()
115 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument
117 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq()
120 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument
127 rq->bio = rq->biotail; in blk_flush_restore_request()
130 rq->rq_flags &= ~RQF_FLUSH_SEQ; in blk_flush_restore_request()
131 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request()
[all …]
Dblk-crypto-internal.h26 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
52 static inline void blk_crypto_rq_set_defaults(struct request *rq) in blk_crypto_rq_set_defaults() argument
54 rq->crypt_ctx = NULL; in blk_crypto_rq_set_defaults()
55 rq->crypt_keyslot = NULL; in blk_crypto_rq_set_defaults()
58 static inline bool blk_crypto_rq_is_encrypted(struct request *rq) in blk_crypto_rq_is_encrypted() argument
60 return rq->crypt_ctx; in blk_crypto_rq_is_encrypted()
63 static inline bool blk_crypto_rq_has_keyslot(struct request *rq) in blk_crypto_rq_has_keyslot() argument
65 return rq->crypt_keyslot; in blk_crypto_rq_has_keyslot()
70 static inline bool bio_crypt_rq_ctx_compatible(struct request *rq, in bio_crypt_rq_ctx_compatible() argument
94 static inline void blk_crypto_rq_set_defaults(struct request *rq) { } in blk_crypto_rq_set_defaults() argument
[all …]
Dblk-mq.c51 static int blk_mq_poll_stats_bkt(const struct request *rq) in blk_mq_poll_stats_bkt() argument
55 ddir = rq_data_dir(rq); in blk_mq_poll_stats_bkt()
56 sectors = blk_rq_stats_sectors(rq); in blk_mq_poll_stats_bkt()
105 struct request *rq, void *priv, in blk_mq_check_inflight() argument
110 if ((!mi->part->bd_partno || rq->part == mi->part) && in blk_mq_check_inflight()
111 blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) in blk_mq_check_inflight()
112 mi->inflight[rq_data_dir(rq)]++; in blk_mq_check_inflight()
281 static inline bool blk_mq_need_time_stamp(struct request *rq) in blk_mq_need_time_stamp() argument
283 return (rq->rq_flags & (RQF_IO_STAT | RQF_STATS)) || rq->q->elevator; in blk_mq_need_time_stamp()
290 struct request *rq = tags->static_rqs[tag]; in blk_mq_rq_ctx_init() local
[all …]
Dblk-exec.c20 static void blk_end_sync_rq(struct request *rq, blk_status_t error) in blk_end_sync_rq() argument
22 struct completion *waiting = rq->end_io_data; in blk_end_sync_rq()
24 rq->end_io_data = (void *)(uintptr_t)error; in blk_end_sync_rq()
47 void blk_execute_rq_nowait(struct gendisk *bd_disk, struct request *rq, in blk_execute_rq_nowait() argument
51 WARN_ON(!blk_rq_is_passthrough(rq)); in blk_execute_rq_nowait()
53 rq->rq_disk = bd_disk; in blk_execute_rq_nowait()
54 rq->end_io = done; in blk_execute_rq_nowait()
56 blk_account_io_start(rq); in blk_execute_rq_nowait()
62 blk_mq_sched_insert_request(rq, at_head, true, false); in blk_execute_rq_nowait()
66 static bool blk_rq_is_poll(struct request *rq) in blk_rq_is_poll() argument
[all …]
Dmq-deadline.c119 deadline_rb_root(struct dd_per_prio *per_prio, struct request *rq) in deadline_rb_root() argument
121 return &per_prio->sort_list[rq_data_dir(rq)]; in deadline_rb_root()
128 static u8 dd_rq_ioclass(struct request *rq) in dd_rq_ioclass() argument
130 return IOPRIO_PRIO_CLASS(req_get_ioprio(rq)); in dd_rq_ioclass()
137 deadline_earlier_request(struct request *rq) in deadline_earlier_request() argument
139 struct rb_node *node = rb_prev(&rq->rb_node); in deadline_earlier_request()
151 deadline_latter_request(struct request *rq) in deadline_latter_request() argument
153 struct rb_node *node = rb_next(&rq->rb_node); in deadline_latter_request()
162 deadline_add_rq_rb(struct dd_per_prio *per_prio, struct request *rq) in deadline_add_rq_rb() argument
164 struct rb_root *root = deadline_rb_root(per_prio, rq); in deadline_add_rq_rb()
[all …]
Dblk-merge.c387 unsigned int blk_recalc_rq_segments(struct request *rq) in blk_recalc_rq_segments() argument
394 if (!rq->bio) in blk_recalc_rq_segments()
397 switch (bio_op(rq->bio)) { in blk_recalc_rq_segments()
400 if (queue_max_discard_segments(rq->q) > 1) { in blk_recalc_rq_segments()
401 struct bio *bio = rq->bio; in blk_recalc_rq_segments()
414 rq_for_each_bvec(bv, rq, iter) in blk_recalc_rq_segments()
415 bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors, in blk_recalc_rq_segments()
542 int __blk_rq_map_sg(struct request_queue *q, struct request *rq, in __blk_rq_map_sg() argument
547 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) in __blk_rq_map_sg()
548 nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg); in __blk_rq_map_sg()
[all …]
Delevator.c54 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) argument
60 static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) in elv_iosched_allow_bio_merge() argument
62 struct request_queue *q = rq->q; in elv_iosched_allow_bio_merge()
66 return e->type->ops.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge()
74 bool elv_bio_merge_ok(struct request *rq, struct bio *bio) in elv_bio_merge_ok() argument
76 if (!blk_rq_merge_ok(rq, bio)) in elv_bio_merge_ok()
79 if (!elv_iosched_allow_bio_merge(rq, bio)) in elv_bio_merge_ok()
200 static inline void __elv_rqhash_del(struct request *rq) in __elv_rqhash_del() argument
202 hash_del(&rq->hash); in __elv_rqhash_del()
203 rq->rq_flags &= ~RQF_HASHED; in __elv_rqhash_del()
[all …]
Dblk-core.c122 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
124 memset(rq, 0, sizeof(*rq)); in blk_rq_init()
126 INIT_LIST_HEAD(&rq->queuelist); in blk_rq_init()
127 rq->q = q; in blk_rq_init()
128 rq->__sector = (sector_t) -1; in blk_rq_init()
129 INIT_HLIST_NODE(&rq->hash); in blk_rq_init()
130 RB_CLEAR_NODE(&rq->rb_node); in blk_rq_init()
131 rq->tag = BLK_MQ_NO_TAG; in blk_rq_init()
132 rq->internal_tag = BLK_MQ_NO_TAG; in blk_rq_init()
133 rq->start_time_ns = ktime_get_ns(); in blk_rq_init()
[all …]
Dblk-mq.h47 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
52 void blk_mq_put_rq_ref(struct request *rq);
71 void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
73 void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
79 blk_status_t blk_mq_request_issue_directly(struct request *rq, bool last);
209 static inline void blk_mq_set_rq_budget_token(struct request *rq, int token) in blk_mq_set_rq_budget_token() argument
214 if (rq->q->mq_ops->set_rq_budget_token) in blk_mq_set_rq_budget_token()
215 rq->q->mq_ops->set_rq_budget_token(rq, token); in blk_mq_set_rq_budget_token()
218 static inline int blk_mq_get_rq_budget_token(struct request *rq) in blk_mq_get_rq_budget_token() argument
220 if (rq->q->mq_ops->get_rq_budget_token) in blk_mq_get_rq_budget_token()
[all …]
Dblk-mq-sched.c21 void blk_mq_sched_assign_ioc(struct request *rq) in blk_mq_sched_assign_ioc() argument
23 struct request_queue *q = rq->q; in blk_mq_sched_assign_ioc()
44 rq->elv.icq = icq; in blk_mq_sched_assign_ioc()
90 struct request *rq; in blk_mq_dispatch_hctx_list() local
94 list_for_each_entry(rq, rq_list, queuelist) { in blk_mq_dispatch_hctx_list()
95 if (rq->mq_hctx != hctx) { in blk_mq_dispatch_hctx_list()
96 list_cut_before(&hctx_list, rq_list, &rq->queuelist); in blk_mq_dispatch_hctx_list()
133 struct request *rq; in __blk_mq_do_dispatch_sched() local
148 rq = e->type->ops.dispatch_request(hctx); in __blk_mq_do_dispatch_sched()
149 if (!rq) { in __blk_mq_do_dispatch_sched()
[all …]
Dbsg-lib.c32 struct request *rq; in bsg_transport_sg_io_fn() local
42 rq = blk_get_request(q, hdr->dout_xfer_len ? in bsg_transport_sg_io_fn()
44 if (IS_ERR(rq)) in bsg_transport_sg_io_fn()
45 return PTR_ERR(rq); in bsg_transport_sg_io_fn()
46 rq->timeout = timeout; in bsg_transport_sg_io_fn()
48 job = blk_mq_rq_to_pdu(rq); in bsg_transport_sg_io_fn()
57 job->bidi_rq = blk_get_request(rq->q, REQ_OP_DRV_IN, 0); in bsg_transport_sg_io_fn()
63 ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL, in bsg_transport_sg_io_fn()
77 ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->dout_xferp), in bsg_transport_sg_io_fn()
80 ret = blk_rq_map_user(rq->q, rq, NULL, uptr64(hdr->din_xferp), in bsg_transport_sg_io_fn()
[all …]
Dblk-mq-sched.h10 void blk_mq_sched_assign_ioc(struct request *rq);
16 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq,
21 void blk_mq_sched_insert_request(struct request *rq, bool at_head,
44 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq, in blk_mq_sched_allow_merge() argument
50 return e->type->ops.allow_merge(q, rq, bio); in blk_mq_sched_allow_merge()
55 static inline void blk_mq_sched_completed_request(struct request *rq, u64 now) in blk_mq_sched_completed_request() argument
57 struct elevator_queue *e = rq->q->elevator; in blk_mq_sched_completed_request()
60 e->type->ops.completed_request(rq, now); in blk_mq_sched_completed_request()
63 static inline void blk_mq_sched_requeue_request(struct request *rq) in blk_mq_sched_requeue_request() argument
65 struct request_queue *q = rq->q; in blk_mq_sched_requeue_request()
[all …]
Dblk-map.c129 static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data, in bio_copy_user_iov() argument
158 bio->bi_opf |= req_op(rq); in bio_copy_user_iov()
190 if (bio_add_pc_page(rq->q, bio, page, bytes, offset) < bytes) { in bio_copy_user_iov()
220 ret = blk_rq_append_bio(rq, bio); in bio_copy_user_iov()
233 static int bio_map_user_iov(struct request *rq, struct iov_iter *iter, in bio_map_user_iov() argument
236 unsigned int max_sectors = queue_max_hw_sectors(rq->q); in bio_map_user_iov()
247 bio->bi_opf |= req_op(rq); in bio_map_user_iov()
263 if (unlikely(offs & queue_dma_alignment(rq->q))) { in bio_map_user_iov()
275 if (!bio_add_hw_page(rq->q, bio, page, n, offs, in bio_map_user_iov()
299 ret = blk_rq_append_bio(rq, bio); in bio_map_user_iov()
[all …]
Dblk-crypto.c186 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio) in bio_crypt_rq_ctx_compatible() argument
188 return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context); in bio_crypt_rq_ctx_compatible()
221 blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq) in __blk_crypto_rq_get_keyslot() argument
223 return blk_ksm_get_slot_for_key(rq->q->ksm, rq->crypt_ctx->bc_key, in __blk_crypto_rq_get_keyslot()
224 &rq->crypt_keyslot); in __blk_crypto_rq_get_keyslot()
227 void __blk_crypto_rq_put_keyslot(struct request *rq) in __blk_crypto_rq_put_keyslot() argument
229 blk_ksm_put_slot(rq->crypt_keyslot); in __blk_crypto_rq_put_keyslot()
230 rq->crypt_keyslot = NULL; in __blk_crypto_rq_put_keyslot()
233 void __blk_crypto_free_request(struct request *rq) in __blk_crypto_free_request() argument
236 if (WARN_ON_ONCE(rq->crypt_keyslot)) in __blk_crypto_free_request()
[all …]
Dblk-rq-qos.h156 void __rq_qos_done(struct rq_qos *rqos, struct request *rq);
157 void __rq_qos_issue(struct rq_qos *rqos, struct request *rq);
158 void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq);
160 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
161 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
171 static inline void rq_qos_done(struct request_queue *q, struct request *rq) in rq_qos_done() argument
174 __rq_qos_done(q->rq_qos, rq); in rq_qos_done()
177 static inline void rq_qos_issue(struct request_queue *q, struct request *rq) in rq_qos_issue() argument
180 __rq_qos_issue(q->rq_qos, rq); in rq_qos_issue()
183 static inline void rq_qos_requeue(struct request_queue *q, struct request *rq) in rq_qos_requeue() argument
[all …]
Dbfq-iosched.c233 #define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \ argument
234 (get_sdist(last_pos, rq) > \
237 blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT))
378 #define RQ_BIC(rq) icq_to_bic((rq)->elv.priv[0]) argument
379 #define RQ_BFQQ(rq) ((rq)->elv.priv[1]) argument
923 struct request *rq; in bfq_check_fifo() local
930 rq = rq_entry_fifo(bfqq->fifo.next); in bfq_check_fifo()
932 if (rq == last || ktime_get_ns() < rq->fifo_time) in bfq_check_fifo()
935 bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); in bfq_check_fifo()
936 return rq; in bfq_check_fifo()
[all …]
Dkyber-iosched.c529 static int rq_get_domain_token(struct request *rq) in rq_get_domain_token() argument
531 return (long)rq->elv.priv[0]; in rq_get_domain_token()
534 static void rq_set_domain_token(struct request *rq, int token) in rq_set_domain_token() argument
536 rq->elv.priv[0] = (void *)(long)token; in rq_set_domain_token()
540 struct request *rq) in rq_clear_domain_token() argument
545 nr = rq_get_domain_token(rq); in rq_clear_domain_token()
547 sched_domain = kyber_sched_domain(rq->cmd_flags); in rq_clear_domain_token()
549 rq->mq_ctx->cpu); in rq_clear_domain_token()
584 static void kyber_prepare_request(struct request *rq) in kyber_prepare_request() argument
586 rq_set_domain_token(rq, -1); in kyber_prepare_request()
[all …]
Dblk-mq-tag.c206 struct request *rq; in blk_mq_find_and_get_req() local
210 rq = tags->rqs[bitnr]; in blk_mq_find_and_get_req()
211 if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref)) in blk_mq_find_and_get_req()
212 rq = NULL; in blk_mq_find_and_get_req()
214 return rq; in blk_mq_find_and_get_req()
223 struct request *rq; in bt_iter() local
232 rq = blk_mq_find_and_get_req(tags, bitnr); in bt_iter()
233 if (!rq) in bt_iter()
236 if (rq->q == hctx->queue && rq->mq_hctx == hctx) in bt_iter()
237 ret = iter_data->fn(hctx, rq, iter_data->data, reserved); in bt_iter()
[all …]
Dblk-wbt.c34 static inline void wbt_clear_state(struct request *rq) in wbt_clear_state() argument
36 rq->wbt_flags = 0; in wbt_clear_state()
39 static inline enum wbt_flags wbt_flags(struct request *rq) in wbt_flags() argument
41 return rq->wbt_flags; in wbt_flags()
44 static inline bool wbt_is_tracked(struct request *rq) in wbt_is_tracked() argument
46 return rq->wbt_flags & WBT_TRACKED; in wbt_is_tracked()
49 static inline bool wbt_is_read(struct request *rq) in wbt_is_read() argument
51 return rq->wbt_flags & WBT_READ; in wbt_is_read()
186 static void wbt_done(struct rq_qos *rqos, struct request *rq) in wbt_done() argument
190 if (!wbt_is_tracked(rq)) { in wbt_done()
[all …]
Dblk-zoned.c58 bool blk_req_needs_zone_write_lock(struct request *rq) in blk_req_needs_zone_write_lock() argument
60 if (!rq->q->seq_zones_wlock) in blk_req_needs_zone_write_lock()
63 if (blk_rq_is_passthrough(rq)) in blk_req_needs_zone_write_lock()
66 switch (req_op(rq)) { in blk_req_needs_zone_write_lock()
70 return blk_rq_zone_is_seq(rq); in blk_req_needs_zone_write_lock()
77 bool blk_req_zone_write_trylock(struct request *rq) in blk_req_zone_write_trylock() argument
79 unsigned int zno = blk_rq_zone_no(rq); in blk_req_zone_write_trylock()
81 if (test_and_set_bit(zno, rq->q->seq_zones_wlock)) in blk_req_zone_write_trylock()
84 WARN_ON_ONCE(rq->rq_flags & RQF_ZONE_WRITE_LOCKED); in blk_req_zone_write_trylock()
85 rq->rq_flags |= RQF_ZONE_WRITE_LOCKED; in blk_req_zone_write_trylock()
[all …]
Dblk.h136 static inline bool blk_integrity_merge_rq(struct request_queue *rq, in blk_integrity_merge_rq() argument
141 static inline bool blk_integrity_merge_bio(struct request_queue *rq, in blk_integrity_merge_bio() argument
196 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED) argument
198 void blk_insert_flush(struct request *rq);
232 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
234 unsigned int blk_recalc_rq_segments(struct request *rq);
235 void blk_rq_set_mixed_merge(struct request *rq);
236 bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
237 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio);
247 static inline bool blk_do_io_stat(struct request *rq) in blk_do_io_stat() argument
[all …]
Dt10-pi.c133 static void t10_pi_type1_prepare(struct request *rq) in t10_pi_type1_prepare() argument
135 const int tuple_sz = rq->q->integrity.tuple_size; in t10_pi_type1_prepare()
136 u32 ref_tag = t10_pi_ref_tag(rq); in t10_pi_type1_prepare()
139 __rq_for_each_bio(bio, rq) { in t10_pi_type1_prepare()
182 static void t10_pi_type1_complete(struct request *rq, unsigned int nr_bytes) in t10_pi_type1_complete() argument
184 unsigned intervals = nr_bytes >> rq->q->integrity.interval_exp; in t10_pi_type1_complete()
185 const int tuple_sz = rq->q->integrity.tuple_size; in t10_pi_type1_complete()
186 u32 ref_tag = t10_pi_ref_tag(rq); in t10_pi_type1_complete()
189 __rq_for_each_bio(bio, rq) { in t10_pi_type1_complete()
236 static void t10_pi_type3_prepare(struct request *rq) in t10_pi_type3_prepare() argument
[all …]
Dblk-pm.h19 static inline void blk_pm_mark_last_busy(struct request *rq) in blk_pm_mark_last_busy() argument
21 if (rq->q->dev && !(rq->rq_flags & RQF_PM)) in blk_pm_mark_last_busy()
22 pm_runtime_mark_last_busy(rq->q->dev); in blk_pm_mark_last_busy()
30 static inline void blk_pm_mark_last_busy(struct request *rq) in blk_pm_mark_last_busy() argument
Dblk-rq-qos.c41 void __rq_qos_done(struct rq_qos *rqos, struct request *rq) in __rq_qos_done() argument
45 rqos->ops->done(rqos, rq); in __rq_qos_done()
50 void __rq_qos_issue(struct rq_qos *rqos, struct request *rq) in __rq_qos_issue() argument
54 rqos->ops->issue(rqos, rq); in __rq_qos_issue()
59 void __rq_qos_requeue(struct rq_qos *rqos, struct request *rq) in __rq_qos_requeue() argument
63 rqos->ops->requeue(rqos, rq); in __rq_qos_requeue()
77 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio) in __rq_qos_track() argument
81 rqos->ops->track(rqos, rq, bio); in __rq_qos_track()
86 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio) in __rq_qos_merge() argument
90 rqos->ops->merge(rqos, rq, bio); in __rq_qos_merge()
Dblk-stat.c51 void blk_stat_add(struct request *rq, u64 now) in blk_stat_add() argument
53 struct request_queue *q = rq->q; in blk_stat_add()
59 value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0; in blk_stat_add()
61 blk_throtl_stat_add(rq, value); in blk_stat_add()
69 bucket = cb->bucket_fn(rq); in blk_stat_add()

12