Home
last modified time | relevance | path

Searched refs:bio (Results 1 – 25 of 39) sorted by relevance

12

/block/
Dbio.c65 unsigned int sz = sizeof(struct bio) + extra_size; in bio_find_or_create_slab()
234 void bio_uninit(struct bio *bio) in bio_uninit() argument
236 bio_disassociate_blkg(bio); in bio_uninit()
238 bio_crypt_free_ctx(bio); in bio_uninit()
240 if (bio_integrity(bio)) in bio_uninit()
241 bio_integrity_free(bio); in bio_uninit()
245 static void bio_free(struct bio *bio) in bio_free() argument
247 struct bio_set *bs = bio->bi_pool; in bio_free()
250 bio_uninit(bio); in bio_free()
253 bvec_free(&bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio)); in bio_free()
[all …]
Dblk-map.c18 int blk_rq_append_bio(struct request *rq, struct bio **bio) in blk_rq_append_bio() argument
20 struct bio *orig_bio = *bio; in blk_rq_append_bio()
25 blk_queue_bounce(rq->q, bio); in blk_rq_append_bio()
27 bio_for_each_bvec(bv, *bio, iter) in blk_rq_append_bio()
30 if (!rq->bio) { in blk_rq_append_bio()
31 blk_rq_bio_prep(rq, *bio, nr_segs); in blk_rq_append_bio()
33 if (!ll_back_merge_fn(rq, *bio, nr_segs)) { in blk_rq_append_bio()
34 if (orig_bio != *bio) { in blk_rq_append_bio()
35 bio_put(*bio); in blk_rq_append_bio()
36 *bio = orig_bio; in blk_rq_append_bio()
[all …]
Dblk-lib.c13 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp) in blk_next_bio() argument
15 struct bio *new = bio_alloc(gfp, nr_pages); in blk_next_bio()
17 if (bio) { in blk_next_bio()
18 bio_chain(bio, new); in blk_next_bio()
19 submit_bio(bio); in blk_next_bio()
27 struct bio **biop) in __blkdev_issue_discard()
30 struct bio *bio = *biop; in __blkdev_issue_discard() local
63 bio = blk_next_bio(bio, 0, gfp_mask); in __blkdev_issue_discard()
64 bio->bi_iter.bi_sector = sector; in __blkdev_issue_discard()
65 bio_set_dev(bio, bdev); in __blkdev_issue_discard()
[all …]
Dbounce.c132 static void copy_to_high_bio_irq(struct bio *to, struct bio *from) in copy_to_high_bio_irq()
162 static void bounce_end_io(struct bio *bio, mempool_t *pool) in bounce_end_io() argument
164 struct bio *bio_orig = bio->bi_private; in bounce_end_io()
172 bio_for_each_segment_all(bvec, bio, iter_all) { in bounce_end_io()
181 bio_orig->bi_status = bio->bi_status; in bounce_end_io()
183 bio_put(bio); in bounce_end_io()
186 static void bounce_end_io_write(struct bio *bio) in bounce_end_io_write() argument
188 bounce_end_io(bio, &page_pool); in bounce_end_io_write()
191 static void bounce_end_io_write_isa(struct bio *bio) in bounce_end_io_write_isa() argument
194 bounce_end_io(bio, &isa_page_pool); in bounce_end_io_write_isa()
[all …]
Dblk-merge.c16 struct request *prev_rq, struct bio *prev, struct bio *next) in bio_will_gap()
29 bio_get_first_bvec(prev_rq->bio, &pb); in bio_will_gap()
51 static inline bool req_gap_back_merge(struct request *req, struct bio *bio) in req_gap_back_merge() argument
53 return bio_will_gap(req->q, req, req->biotail, bio); in req_gap_back_merge()
56 static inline bool req_gap_front_merge(struct request *req, struct bio *bio) in req_gap_front_merge() argument
58 return bio_will_gap(req->q, NULL, bio, req->bio); in req_gap_front_merge()
61 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split()
62 struct bio *bio, in blk_bio_discard_split() argument
85 if (bio_sectors(bio) <= max_discard_sectors) in blk_bio_discard_split()
96 tmp = bio->bi_iter.bi_sector + split_sectors - alignment; in blk_bio_discard_split()
[all …]
Dblk-core.c233 static void req_bio_endio(struct request *rq, struct bio *bio, in req_bio_endio() argument
237 bio->bi_status = error; in req_bio_endio()
240 bio_set_flag(bio, BIO_QUIET); in req_bio_endio()
242 bio_advance(bio, nbytes); in req_bio_endio()
245 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) in req_bio_endio()
246 bio_endio(bio); in req_bio_endio()
259 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags()
603 bool bio_attempt_back_merge(struct request *req, struct bio *bio, in bio_attempt_back_merge() argument
606 const int ff = bio->bi_opf & REQ_FAILFAST_MASK; in bio_attempt_back_merge()
608 if (!ll_back_merge_fn(req, bio, nr_segs)) in bio_attempt_back_merge()
[all …]
Dbio-integrity.c37 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, in bio_integrity_alloc() argument
42 struct bio_set *bs = bio->bi_pool; in bio_integrity_alloc()
72 bip->bip_bio = bio; in bio_integrity_alloc()
73 bio->bi_integrity = bip; in bio_integrity_alloc()
74 bio->bi_opf |= REQ_INTEGRITY; in bio_integrity_alloc()
90 void bio_integrity_free(struct bio *bio) in bio_integrity_free() argument
92 struct bio_integrity_payload *bip = bio_integrity(bio); in bio_integrity_free()
93 struct bio_set *bs = bio->bi_pool; in bio_integrity_free()
107 bio->bi_integrity = NULL; in bio_integrity_free()
108 bio->bi_opf &= ~REQ_INTEGRITY; in bio_integrity_free()
[all …]
Dblk.h110 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, in blk_rq_bio_prep() argument
114 rq->__data_len = bio->bi_iter.bi_size; in blk_rq_bio_prep()
115 rq->bio = rq->biotail = bio; in blk_rq_bio_prep()
116 rq->ioprio = bio_prio(bio); in blk_rq_bio_prep()
118 if (bio->bi_disk) in blk_rq_bio_prep()
119 rq->rq_disk = bio->bi_disk; in blk_rq_bio_prep()
124 bool __bio_integrity_endio(struct bio *);
125 void bio_integrity_free(struct bio *bio);
126 static inline bool bio_integrity_endio(struct bio *bio) in bio_integrity_endio() argument
128 if (bio_integrity(bio)) in bio_integrity_endio()
[all …]
Dblk-crypto-fallback.c68 struct bio *bio; member
153 static void blk_crypto_encrypt_endio(struct bio *enc_bio) in blk_crypto_encrypt_endio()
155 struct bio *src_bio = enc_bio->bi_private; in blk_crypto_encrypt_endio()
168 static struct bio *blk_crypto_clone_bio(struct bio *bio_src) in blk_crypto_clone_bio()
172 struct bio *bio; in blk_crypto_clone_bio() local
174 bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), NULL); in blk_crypto_clone_bio()
175 if (!bio) in blk_crypto_clone_bio()
177 bio->bi_disk = bio_src->bi_disk; in blk_crypto_clone_bio()
178 bio->bi_opf = bio_src->bi_opf; in blk_crypto_clone_bio()
179 bio->bi_ioprio = bio_src->bi_ioprio; in blk_crypto_clone_bio()
[all …]
Dblk-rq-qos.h37 void (*throttle)(struct rq_qos *, struct bio *);
38 void (*track)(struct rq_qos *, struct request *, struct bio *);
39 void (*merge)(struct rq_qos *, struct request *, struct bio *);
43 void (*done_bio)(struct rq_qos *, struct bio *);
44 void (*cleanup)(struct rq_qos *, struct bio *);
134 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
138 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
139 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
140 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
141 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
[all …]
Dblk-crypto.c39 static int bio_crypt_check_alignment(struct bio *bio) in bio_crypt_check_alignment() argument
42 bio->bi_crypt_context->bc_key->data_unit_size; in bio_crypt_check_alignment()
46 bio_for_each_segment(bv, bio, iter) { in bio_crypt_check_alignment()
72 int blk_crypto_submit_bio(struct bio **bio_ptr) in blk_crypto_submit_bio()
74 struct bio *bio = *bio_ptr; in blk_crypto_submit_bio() local
76 struct bio_crypt_ctx *bc = bio->bi_crypt_context; in blk_crypto_submit_bio()
79 if (!bc || !bio_has_data(bio)) in blk_crypto_submit_bio()
92 err = bio_crypt_check_alignment(bio); in blk_crypto_submit_bio()
94 bio->bi_status = BLK_STS_IOERR; in blk_crypto_submit_bio()
98 q = bio->bi_disk->queue; in blk_crypto_submit_bio()
[all …]
Dbio-crypt-ctx.c54 void bio_crypt_free_ctx(struct bio *bio) in bio_crypt_free_ctx() argument
56 mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool); in bio_crypt_free_ctx()
57 bio->bi_crypt_context = NULL; in bio_crypt_free_ctx()
60 void bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) in bio_crypt_clone()
84 struct bio *bio = rq->bio; in bio_crypt_should_process() local
86 if (!bio || !bio->bi_crypt_context) in bio_crypt_should_process()
89 return rq->q->ksm == bio->bi_crypt_context->bc_ksm; in bio_crypt_should_process()
97 bool bio_crypt_ctx_compatible(struct bio *b_1, struct bio *b_2) in bio_crypt_ctx_compatible()
112 bool bio_crypt_ctx_mergeable(struct bio *b_1, unsigned int b1_bytes, in bio_crypt_ctx_mergeable()
113 struct bio *b_2) in bio_crypt_ctx_mergeable()
Dblk-mq-sched.h14 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
16 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
35 blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, in blk_mq_sched_bio_merge() argument
38 if (blk_queue_nomerges(q) || !bio_mergeable(bio)) in blk_mq_sched_bio_merge()
41 return __blk_mq_sched_bio_merge(q, bio, nr_segs); in blk_mq_sched_bio_merge()
46 struct bio *bio) in blk_mq_sched_allow_merge() argument
51 return e->type->ops.allow_merge(q, rq, bio); in blk_mq_sched_allow_merge()
Dblk-throttle.c381 static inline unsigned int throtl_bio_data_size(struct bio *bio) in throtl_bio_data_size() argument
384 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) in throtl_bio_data_size()
386 return bio->bi_iter.bi_size; in throtl_bio_data_size()
406 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, in throtl_qnode_add_bio() argument
409 bio_list_add(&qn->bios, bio); in throtl_qnode_add_bio()
420 static struct bio *throtl_peek_queued(struct list_head *queued) in throtl_peek_queued()
423 struct bio *bio; in throtl_peek_queued() local
428 bio = bio_list_peek(&qn->bios); in throtl_peek_queued()
429 WARN_ON_ONCE(!bio); in throtl_peek_queued()
430 return bio; in throtl_peek_queued()
[all …]
Dblk-mq-sched.c226 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, in blk_mq_sched_try_merge() argument
231 switch (elv_merge(q, &rq, bio)) { in blk_mq_sched_try_merge()
233 if (!blk_mq_sched_allow_merge(q, rq, bio)) in blk_mq_sched_try_merge()
235 if (!bio_attempt_back_merge(rq, bio, nr_segs)) in blk_mq_sched_try_merge()
242 if (!blk_mq_sched_allow_merge(q, rq, bio)) in blk_mq_sched_try_merge()
244 if (!bio_attempt_front_merge(rq, bio, nr_segs)) in blk_mq_sched_try_merge()
251 return bio_attempt_discard_merge(q, rq, bio); in blk_mq_sched_try_merge()
263 struct bio *bio, unsigned int nr_segs) in blk_mq_bio_list_merge() argument
274 if (!blk_rq_merge_ok(rq, bio)) in blk_mq_bio_list_merge()
277 switch (blk_try_merge(rq, bio)) { in blk_mq_bio_list_merge()
[all …]
Dblk-rq-qos.c32 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio) in __rq_qos_cleanup() argument
36 rqos->ops->cleanup(rqos, bio); in __rq_qos_cleanup()
68 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio) in __rq_qos_throttle() argument
72 rqos->ops->throttle(rqos, bio); in __rq_qos_throttle()
77 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio) in __rq_qos_track() argument
81 rqos->ops->track(rqos, rq, bio); in __rq_qos_track()
86 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio) in __rq_qos_merge() argument
90 rqos->ops->merge(rqos, rq, bio); in __rq_qos_merge()
95 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio) in __rq_qos_done_bio() argument
99 rqos->ops->done_bio(rqos, bio); in __rq_qos_done_bio()
Dblk-integrity.c27 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) in blk_rq_count_integrity_sg() argument
35 bio_for_each_integrity_vec(iv, bio, iter) { in blk_rq_count_integrity_sg()
68 int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, in blk_rq_map_integrity_sg() argument
77 bio_for_each_integrity_vec(iv, bio, iter) { in blk_rq_map_integrity_sg()
173 if (bio_integrity(req->bio)->bip_flags != in blk_integrity_merge_rq()
174 bio_integrity(next->bio)->bip_flags) in blk_integrity_merge_rq()
181 if (integrity_req_gap_back_merge(req, next->bio)) in blk_integrity_merge_rq()
189 struct bio *bio) in blk_integrity_merge_bio() argument
192 struct bio *next = bio->bi_next; in blk_integrity_merge_bio()
194 if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL) in blk_integrity_merge_bio()
[all …]
Dblk-crypto-internal.h22 int blk_crypto_fallback_submit_bio(struct bio **bio_ptr);
24 bool blk_crypto_queue_decrypt_bio(struct bio *bio);
37 static inline int blk_crypto_fallback_submit_bio(struct bio **bio_ptr) in blk_crypto_fallback_submit_bio()
44 static inline bool blk_crypto_queue_decrypt_bio(struct bio *bio) in blk_crypto_queue_decrypt_bio() argument
Dblk-wbt.c529 static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) in wbt_should_throttle() argument
531 switch (bio_op(bio)) { in wbt_should_throttle()
536 if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) == in wbt_should_throttle()
547 static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio) in bio_to_wbt_flags() argument
554 if (bio_op(bio) == REQ_OP_READ) { in bio_to_wbt_flags()
556 } else if (wbt_should_throttle(rwb, bio)) { in bio_to_wbt_flags()
559 if (bio_op(bio) == REQ_OP_DISCARD) in bio_to_wbt_flags()
566 static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio) in wbt_cleanup() argument
569 enum wbt_flags flags = bio_to_wbt_flags(rwb, bio); in wbt_cleanup()
579 static void wbt_wait(struct rq_qos *rqos, struct bio *bio) in wbt_wait() argument
[all …]
Dblk-flush.c128 rq->bio = rq->biotail; in blk_flush_restore_request()
393 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ in blk_insert_flush()
437 struct bio *bio; in blkdev_issue_flush() local
456 bio = bio_alloc(gfp_mask, 0); in blkdev_issue_flush()
457 bio_set_dev(bio, bdev); in blkdev_issue_flush()
458 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; in blkdev_issue_flush()
460 ret = submit_bio_wait(bio); in blkdev_issue_flush()
468 *error_sector = bio->bi_iter.bi_sector; in blkdev_issue_flush()
470 bio_put(bio); in blkdev_issue_flush()
Dblk-zoned.c164 struct bio *bio = NULL; in blkdev_zone_mgmt() local
188 bio = blk_next_bio(bio, 0, gfp_mask); in blkdev_zone_mgmt()
189 bio_set_dev(bio, bdev); in blkdev_zone_mgmt()
197 bio->bi_opf = REQ_OP_ZONE_RESET_ALL; in blkdev_zone_mgmt()
201 bio->bi_opf = op; in blkdev_zone_mgmt()
202 bio->bi_iter.bi_sector = sector; in blkdev_zone_mgmt()
209 ret = submit_bio_wait(bio); in blkdev_zone_mgmt()
210 bio_put(bio); in blkdev_zone_mgmt()
Dt10-pi.c136 struct bio *bio; in t10_pi_type1_prepare() local
138 __rq_for_each_bio(bio, rq) { in t10_pi_type1_prepare()
139 struct bio_integrity_payload *bip = bio_integrity(bio); in t10_pi_type1_prepare()
188 struct bio *bio; in t10_pi_type1_complete() local
190 __rq_for_each_bio(bio, rq) { in t10_pi_type1_complete()
191 struct bio_integrity_payload *bip = bio_integrity(bio); in t10_pi_type1_complete()
Delevator.c60 static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) in elv_iosched_allow_bio_merge() argument
66 return e->type->ops.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge()
74 bool elv_bio_merge_ok(struct request *rq, struct bio *bio) in elv_bio_merge_ok() argument
76 if (!blk_rq_merge_ok(rq, bio)) in elv_bio_merge_ok()
79 if (!elv_iosched_allow_bio_merge(rq, bio)) in elv_bio_merge_ok()
305 struct bio *bio) in elv_merge() argument
316 if (blk_queue_nomerges(q) || !bio_mergeable(bio)) in elv_merge()
322 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { in elv_merge()
323 enum elv_merge ret = blk_try_merge(q->last_merge, bio); in elv_merge()
337 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); in elv_merge()
[all …]
Dblk-iocost.c518 struct bio *bio; member
672 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio, u64 cost) in iocg_commit_bio() argument
674 bio->bi_iocost_cost = cost; in iocg_commit_bio()
1128 iocg_commit_bio(ctx->iocg, wait->bio, cost); in iocg_wake_fn()
1630 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg, in calc_vtime_cost_builtin() argument
1635 u64 pages = max_t(u64, bio_sectors(bio) >> IOC_SECT_TO_PAGE_SHIFT, 1); in calc_vtime_cost_builtin()
1639 switch (bio_op(bio)) { in calc_vtime_cost_builtin()
1655 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor); in calc_vtime_cost_builtin()
1671 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge) in calc_vtime_cost() argument
1675 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost); in calc_vtime_cost()
[all …]
Dblk-iolatency.c460 static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio) in blkcg_iolatency_throttle() argument
463 struct blkcg_gq *blkg = bio->bi_blkg; in blkcg_iolatency_throttle()
464 bool issue_as_root = bio_issue_as_root_blkg(bio); in blkcg_iolatency_throttle()
478 (bio->bi_opf & REQ_SWAP) == REQ_SWAP); in blkcg_iolatency_throttle()
588 static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) in blkcg_iolatency_done_bio() argument
595 bool issue_as_root = bio_issue_as_root_blkg(bio); in blkcg_iolatency_done_bio()
599 blkg = bio->bi_blkg; in blkcg_iolatency_done_bio()
600 if (!blkg || !bio_flagged(bio, BIO_TRACKED)) in blkcg_iolatency_done_bio()
603 iolat = blkg_to_lat(bio->bi_blkg); in blkcg_iolatency_done_bio()
625 if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) { in blkcg_iolatency_done_bio()
[all …]

12