Home
last modified time | relevance | path

Searched refs:bio (Results 1 – 25 of 37) sorted by relevance

12

/block/
Dbio.c109 return bs->front_pad + sizeof(struct bio) + bs->back_pad; in bs_bio_slab_size()
208 void bio_uninit(struct bio *bio) in bio_uninit() argument
211 if (bio->bi_blkg) { in bio_uninit()
212 blkg_put(bio->bi_blkg); in bio_uninit()
213 bio->bi_blkg = NULL; in bio_uninit()
216 if (bio_integrity(bio)) in bio_uninit()
217 bio_integrity_free(bio); in bio_uninit()
219 bio_crypt_free_ctx(bio); in bio_uninit()
223 static void bio_free(struct bio *bio) in bio_free() argument
225 struct bio_set *bs = bio->bi_pool; in bio_free()
[all …]
Dblk-merge.c18 struct request *prev_rq, struct bio *prev, struct bio *next) in bio_will_gap()
31 bio_get_first_bvec(prev_rq->bio, &pb); in bio_will_gap()
53 static inline bool req_gap_back_merge(struct request *req, struct bio *bio) in req_gap_back_merge() argument
55 return bio_will_gap(req->q, req, req->biotail, bio); in req_gap_back_merge()
58 static inline bool req_gap_front_merge(struct request *req, struct bio *bio) in req_gap_front_merge() argument
60 return bio_will_gap(req->q, NULL, bio, req->bio); in req_gap_front_merge()
63 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split()
64 struct bio *bio, in blk_bio_discard_split() argument
87 if (bio_sectors(bio) <= max_discard_sectors) in blk_bio_discard_split()
98 tmp = bio->bi_iter.bi_sector + split_sectors - alignment; in blk_bio_discard_split()
[all …]
Dblk-map.c46 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) in bio_copy_from_iter() argument
51 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_from_iter()
77 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) in bio_copy_to_iter() argument
82 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_to_iter()
107 static int bio_uncopy_user(struct bio *bio) in bio_uncopy_user() argument
109 struct bio_map_data *bmd = bio->bi_private; in bio_uncopy_user()
120 else if (bio_data_dir(bio) == READ) in bio_uncopy_user()
121 ret = bio_copy_to_iter(bio, bmd->iter); in bio_uncopy_user()
123 bio_free_pages(bio); in bio_uncopy_user()
134 struct bio *bio; in bio_copy_user_iov() local
[all …]
Dblk-lib.c13 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp) in blk_next_bio() argument
15 struct bio *new = bio_alloc(gfp, nr_pages); in blk_next_bio()
17 if (bio) { in blk_next_bio()
18 bio_chain(bio, new); in blk_next_bio()
19 submit_bio(bio); in blk_next_bio()
28 struct bio **biop) in __blkdev_issue_discard()
31 struct bio *bio = *biop; in __blkdev_issue_discard() local
98 bio = blk_next_bio(bio, 0, gfp_mask); in __blkdev_issue_discard()
99 bio->bi_iter.bi_sector = sector; in __blkdev_issue_discard()
100 bio_set_dev(bio, bdev); in __blkdev_issue_discard()
[all …]
Dbounce.c75 static void copy_to_high_bio_irq(struct bio *to, struct bio *from) in copy_to_high_bio_irq()
101 static void bounce_end_io(struct bio *bio) in bounce_end_io() argument
103 struct bio *bio_orig = bio->bi_private; in bounce_end_io()
111 bio_for_each_segment_all(bvec, bio, iter_all) { in bounce_end_io()
120 bio_orig->bi_status = bio->bi_status; in bounce_end_io()
122 bio_put(bio); in bounce_end_io()
125 static void bounce_end_io_write(struct bio *bio) in bounce_end_io_write() argument
127 bounce_end_io(bio); in bounce_end_io_write()
130 static void bounce_end_io_read(struct bio *bio) in bounce_end_io_read() argument
132 struct bio *bio_orig = bio->bi_private; in bounce_end_io_read()
[all …]
Dblk-core.c243 static void req_bio_endio(struct request *rq, struct bio *bio, in req_bio_endio() argument
247 bio->bi_status = error; in req_bio_endio()
250 bio_set_flag(bio, BIO_QUIET); in req_bio_endio()
252 bio_advance(bio, nbytes); in req_bio_endio()
259 if (bio->bi_iter.bi_size) in req_bio_endio()
260 bio->bi_status = BLK_STS_IOERR; in req_bio_endio()
262 bio->bi_iter.bi_sector = rq->__sector; in req_bio_endio()
266 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) in req_bio_endio()
267 bio_endio(bio); in req_bio_endio()
280 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags()
[all …]
Dbio-integrity.c48 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, in bio_integrity_alloc() argument
53 struct bio_set *bs = bio->bi_pool; in bio_integrity_alloc()
56 if (WARN_ON_ONCE(bio_has_crypt_ctx(bio))) in bio_integrity_alloc()
83 bip->bip_bio = bio; in bio_integrity_alloc()
84 bio->bi_integrity = bip; in bio_integrity_alloc()
85 bio->bi_opf |= REQ_INTEGRITY; in bio_integrity_alloc()
101 void bio_integrity_free(struct bio *bio) in bio_integrity_free() argument
103 struct bio_integrity_payload *bip = bio_integrity(bio); in bio_integrity_free()
104 struct bio_set *bs = bio->bi_pool; in bio_integrity_free()
110 bio->bi_integrity = NULL; in bio_integrity_free()
[all …]
Dblk-crypto-internal.h32 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);
38 struct bio *bio) in bio_crypt_ctx_back_mergeable() argument
41 bio->bi_crypt_context); in bio_crypt_ctx_back_mergeable()
45 struct bio *bio) in bio_crypt_ctx_front_mergeable() argument
47 return bio_crypt_ctx_mergeable(bio->bi_crypt_context, in bio_crypt_ctx_front_mergeable()
48 bio->bi_iter.bi_size, req->crypt_ctx); in bio_crypt_ctx_front_mergeable()
96 struct bio *bio) in bio_crypt_rq_ctx_compatible() argument
102 struct bio *bio) in bio_crypt_ctx_front_mergeable() argument
108 struct bio *bio) in bio_crypt_ctx_back_mergeable() argument
133 void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
[all …]
Dblk.h138 bool __bio_integrity_endio(struct bio *);
139 void bio_integrity_free(struct bio *bio);
140 static inline bool bio_integrity_endio(struct bio *bio) in bio_integrity_endio() argument
142 if (bio_integrity(bio)) in bio_integrity_endio()
143 return __bio_integrity_endio(bio); in bio_integrity_endio()
150 struct bio *);
153 struct bio *next) in integrity_req_gap_back_merge()
155 struct bio_integrity_payload *bip = bio_integrity(req->bio); in integrity_req_gap_back_merge()
163 struct bio *bio) in integrity_req_gap_front_merge() argument
165 struct bio_integrity_payload *bip = bio_integrity(bio); in integrity_req_gap_front_merge()
[all …]
Dblk-rq-qos.h39 void (*throttle)(struct rq_qos *, struct bio *);
40 void (*track)(struct rq_qos *, struct request *, struct bio *);
41 void (*merge)(struct rq_qos *, struct request *, struct bio *);
45 void (*done_bio)(struct rq_qos *, struct bio *);
46 void (*cleanup)(struct rq_qos *, struct bio *);
155 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio);
159 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio);
160 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio);
161 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio);
162 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio);
[all …]
Dfops.c46 static void blkdev_bio_end_io_simple(struct bio *bio) in blkdev_bio_end_io_simple() argument
48 struct task_struct *waiter = bio->bi_private; in blkdev_bio_end_io_simple()
50 WRITE_ONCE(bio->bi_private, NULL); in blkdev_bio_end_io_simple()
62 struct bio bio; in __blkdev_direct_IO_simple() local
79 bio_init(&bio, vecs, nr_pages); in __blkdev_direct_IO_simple()
80 bio_set_dev(&bio, bdev); in __blkdev_direct_IO_simple()
81 bio.bi_iter.bi_sector = pos >> 9; in __blkdev_direct_IO_simple()
82 bio.bi_write_hint = iocb->ki_hint; in __blkdev_direct_IO_simple()
83 bio.bi_private = current; in __blkdev_direct_IO_simple()
84 bio.bi_end_io = blkdev_bio_end_io_simple; in __blkdev_direct_IO_simple()
[all …]
Dblk-crypto-fallback.c51 struct bio *bio; member
143 static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) in blk_crypto_fallback_encrypt_endio()
145 struct bio *src_bio = enc_bio->bi_private; in blk_crypto_fallback_encrypt_endio()
158 static struct bio *blk_crypto_fallback_clone_bio(struct bio *bio_src) in blk_crypto_fallback_clone_bio()
162 struct bio *bio; in blk_crypto_fallback_clone_bio() local
164 bio = bio_kmalloc(GFP_NOIO, bio_segments(bio_src)); in blk_crypto_fallback_clone_bio()
165 if (!bio) in blk_crypto_fallback_clone_bio()
167 bio->bi_bdev = bio_src->bi_bdev; in blk_crypto_fallback_clone_bio()
169 bio_set_flag(bio, BIO_REMAPPED); in blk_crypto_fallback_clone_bio()
170 bio->bi_opf = bio_src->bi_opf; in blk_crypto_fallback_clone_bio()
[all …]
Dblk-crypto.c101 void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key, in bio_crypt_set_ctx() argument
117 bio->bi_crypt_context = bc; in bio_crypt_set_ctx()
121 void __bio_crypt_free_ctx(struct bio *bio) in __bio_crypt_free_ctx() argument
123 mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool); in __bio_crypt_free_ctx()
124 bio->bi_crypt_context = NULL; in __bio_crypt_free_ctx()
127 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) in __bio_crypt_clone()
156 void __bio_crypt_advance(struct bio *bio, unsigned int bytes) in __bio_crypt_advance() argument
158 struct bio_crypt_ctx *bc = bio->bi_crypt_context; in __bio_crypt_advance()
205 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio) in bio_crypt_rq_ctx_compatible() argument
207 return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context); in bio_crypt_rq_ctx_compatible()
[all …]
Dblk-mq-sched.h12 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
14 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio,
34 blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, in blk_mq_sched_bio_merge() argument
37 if (blk_queue_nomerges(q) || !bio_mergeable(bio)) in blk_mq_sched_bio_merge()
40 return __blk_mq_sched_bio_merge(q, bio, nr_segs); in blk_mq_sched_bio_merge()
45 struct bio *bio) in blk_mq_sched_allow_merge() argument
50 return e->type->ops.allow_merge(q, rq, bio); in blk_mq_sched_allow_merge()
Dblk-rq-qos.c32 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio) in __rq_qos_cleanup() argument
36 rqos->ops->cleanup(rqos, bio); in __rq_qos_cleanup()
68 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio) in __rq_qos_throttle() argument
72 rqos->ops->throttle(rqos, bio); in __rq_qos_throttle()
77 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio) in __rq_qos_track() argument
81 rqos->ops->track(rqos, rq, bio); in __rq_qos_track()
86 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio) in __rq_qos_merge() argument
90 rqos->ops->merge(rqos, rq, bio); in __rq_qos_merge()
95 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio) in __rq_qos_done_bio() argument
99 rqos->ops->done_bio(rqos, bio); in __rq_qos_done_bio()
Dblk-throttle.c388 static inline unsigned int throtl_bio_data_size(struct bio *bio) in throtl_bio_data_size() argument
391 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) in throtl_bio_data_size()
393 return bio->bi_iter.bi_size; in throtl_bio_data_size()
413 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, in throtl_qnode_add_bio() argument
416 bio_list_add(&qn->bios, bio); in throtl_qnode_add_bio()
427 static struct bio *throtl_peek_queued(struct list_head *queued) in throtl_peek_queued()
430 struct bio *bio; in throtl_peek_queued() local
436 bio = bio_list_peek(&qn->bios); in throtl_peek_queued()
437 WARN_ON_ONCE(!bio); in throtl_peek_queued()
438 return bio; in throtl_peek_queued()
[all …]
Dblk-zoned.c190 struct bio *bio = NULL; in blkdev_zone_reset_all_emulated() local
211 bio = blk_next_bio(bio, 0, gfp_mask); in blkdev_zone_reset_all_emulated()
212 bio_set_dev(bio, bdev); in blkdev_zone_reset_all_emulated()
213 bio->bi_opf = REQ_OP_ZONE_RESET | REQ_SYNC; in blkdev_zone_reset_all_emulated()
214 bio->bi_iter.bi_sector = sector; in blkdev_zone_reset_all_emulated()
221 if (bio) { in blkdev_zone_reset_all_emulated()
222 ret = submit_bio_wait(bio); in blkdev_zone_reset_all_emulated()
223 bio_put(bio); in blkdev_zone_reset_all_emulated()
233 struct bio bio; in blkdev_zone_reset_all() local
235 bio_init(&bio, NULL, 0); in blkdev_zone_reset_all()
[all …]
Dblk-integrity.c27 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) in blk_rq_count_integrity_sg() argument
35 bio_for_each_integrity_vec(iv, bio, iter) { in blk_rq_count_integrity_sg()
68 int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, in blk_rq_map_integrity_sg() argument
77 bio_for_each_integrity_vec(iv, bio, iter) { in blk_rq_map_integrity_sg()
173 if (bio_integrity(req->bio)->bip_flags != in blk_integrity_merge_rq()
174 bio_integrity(next->bio)->bip_flags) in blk_integrity_merge_rq()
181 if (integrity_req_gap_back_merge(req, next->bio)) in blk_integrity_merge_rq()
188 struct bio *bio) in blk_integrity_merge_bio() argument
191 struct bio *next = bio->bi_next; in blk_integrity_merge_bio()
193 if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL) in blk_integrity_merge_bio()
[all …]
Dblk-wbt.c525 static inline bool wbt_should_throttle(struct bio *bio) in wbt_should_throttle() argument
527 switch (bio_op(bio)) { in wbt_should_throttle()
532 if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) == in wbt_should_throttle()
543 static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio) in bio_to_wbt_flags() argument
550 if (bio_op(bio) == REQ_OP_READ) { in bio_to_wbt_flags()
552 } else if (wbt_should_throttle(bio)) { in bio_to_wbt_flags()
555 if (bio_op(bio) == REQ_OP_DISCARD) in bio_to_wbt_flags()
562 static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio) in wbt_cleanup() argument
565 enum wbt_flags flags = bio_to_wbt_flags(rwb, bio); in wbt_cleanup()
574 static void wbt_wait(struct rq_qos *rqos, struct bio *bio) in wbt_wait() argument
[all …]
Dblk-ioprio.c84 static struct ioprio_blkcg *ioprio_blkcg_from_bio(struct bio *bio) in ioprio_blkcg_from_bio() argument
86 struct blkg_policy_data *pd = blkg_to_pd(bio->bi_blkg, &ioprio_policy); in ioprio_blkcg_from_bio()
190 struct bio *bio) in blkcg_ioprio_track() argument
192 struct ioprio_blkcg *blkcg = ioprio_blkcg_from_bio(bio); in blkcg_ioprio_track()
202 bio->bi_ioprio = max_t(u16, bio->bi_ioprio, in blkcg_ioprio_track()
Dblk-cgroup.c122 struct bio *bio; in blkg_async_bio_workfn() local
137 while ((bio = bio_list_pop(&bios))) in blkg_async_bio_workfn()
138 submit_bio(bio); in blkg_async_bio_workfn()
1575 bool __blkcg_punt_bio_submit(struct bio *bio) in __blkcg_punt_bio_submit() argument
1577 struct blkcg_gq *blkg = bio->bi_blkg; in __blkcg_punt_bio_submit()
1580 bio->bi_opf &= ~REQ_CGROUP_PUNT; in __blkcg_punt_bio_submit()
1587 bio_list_add(&blkg->async_bios, bio); in __blkcg_punt_bio_submit()
1824 static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio, in blkg_tryget_closest() argument
1831 bio->bi_bdev->bd_disk->queue); in blkg_tryget_closest()
1858 void bio_associate_blkg_from_css(struct bio *bio, in bio_associate_blkg_from_css() argument
[all …]
Dt10-pi.c137 struct bio *bio; in t10_pi_type1_prepare() local
139 __rq_for_each_bio(bio, rq) { in t10_pi_type1_prepare()
140 struct bio_integrity_payload *bip = bio_integrity(bio); in t10_pi_type1_prepare()
187 struct bio *bio; in t10_pi_type1_complete() local
189 __rq_for_each_bio(bio, rq) { in t10_pi_type1_complete()
190 struct bio_integrity_payload *bip = bio_integrity(bio); in t10_pi_type1_complete()
Delevator.c60 static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) in elv_iosched_allow_bio_merge() argument
66 return e->type->ops.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge()
74 bool elv_bio_merge_ok(struct request *rq, struct bio *bio) in elv_bio_merge_ok() argument
76 if (!blk_rq_merge_ok(rq, bio)) in elv_bio_merge_ok()
79 if (!elv_iosched_allow_bio_merge(rq, bio)) in elv_bio_merge_ok()
304 struct bio *bio) in elv_merge() argument
315 if (blk_queue_nomerges(q) || !bio_mergeable(bio)) in elv_merge()
321 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { in elv_merge()
322 enum elv_merge ret = blk_try_merge(q->last_merge, bio); in elv_merge()
336 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); in elv_merge()
[all …]
Dblk-flush.c127 rq->bio = rq->biotail; in blk_flush_restore_request()
417 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ in blk_insert_flush()
455 struct bio bio; in blkdev_issue_flush() local
457 bio_init(&bio, NULL, 0); in blkdev_issue_flush()
458 bio_set_dev(&bio, bdev); in blkdev_issue_flush()
459 bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; in blkdev_issue_flush()
460 return submit_bio_wait(&bio); in blkdev_issue_flush()
Dblk-iolatency.c465 static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio) in blkcg_iolatency_throttle() argument
468 struct blkcg_gq *blkg = bio->bi_blkg; in blkcg_iolatency_throttle()
469 bool issue_as_root = bio_issue_as_root_blkg(bio); in blkcg_iolatency_throttle()
483 (bio->bi_opf & REQ_SWAP) == REQ_SWAP); in blkcg_iolatency_throttle()
593 static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) in blkcg_iolatency_done_bio() argument
600 bool issue_as_root = bio_issue_as_root_blkg(bio); in blkcg_iolatency_done_bio()
603 blkg = bio->bi_blkg; in blkcg_iolatency_done_bio()
604 if (!blkg || !bio_flagged(bio, BIO_QOS_THROTTLED)) in blkcg_iolatency_done_bio()
607 iolat = blkg_to_lat(bio->bi_blkg); in blkcg_iolatency_done_bio()
629 if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) { in blkcg_iolatency_done_bio()
[all …]

12