/block/ |
D | bio.c | 67 unsigned int sz = sizeof(struct bio) + extra_size; in bio_find_or_create_slab() 236 void bio_uninit(struct bio *bio) in bio_uninit() argument 239 if (bio->bi_blkg) { in bio_uninit() 240 blkg_put(bio->bi_blkg); in bio_uninit() 241 bio->bi_blkg = NULL; in bio_uninit() 244 if (bio_integrity(bio)) in bio_uninit() 245 bio_integrity_free(bio); in bio_uninit() 247 bio_crypt_free_ctx(bio); in bio_uninit() 251 static void bio_free(struct bio *bio) in bio_free() argument 253 struct bio_set *bs = bio->bi_pool; in bio_free() [all …]
|
D | blk-map.c | 46 static int bio_copy_from_iter(struct bio *bio, struct iov_iter *iter) in bio_copy_from_iter() argument 51 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_from_iter() 77 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter) in bio_copy_to_iter() argument 82 bio_for_each_segment_all(bvec, bio, iter_all) { in bio_copy_to_iter() 107 static int bio_uncopy_user(struct bio *bio) in bio_uncopy_user() argument 109 struct bio_map_data *bmd = bio->bi_private; in bio_uncopy_user() 120 else if (bio_data_dir(bio) == READ) in bio_uncopy_user() 121 ret = bio_copy_to_iter(bio, bmd->iter); in bio_uncopy_user() 123 bio_free_pages(bio); in bio_uncopy_user() 126 bio_put(bio); in bio_uncopy_user() [all …]
|
D | blk-merge.c | 20 struct request *prev_rq, struct bio *prev, struct bio *next) in bio_will_gap() 33 bio_get_first_bvec(prev_rq->bio, &pb); in bio_will_gap() 55 static inline bool req_gap_back_merge(struct request *req, struct bio *bio) in req_gap_back_merge() argument 57 return bio_will_gap(req->q, req, req->biotail, bio); in req_gap_back_merge() 60 static inline bool req_gap_front_merge(struct request *req, struct bio *bio) in req_gap_front_merge() argument 62 return bio_will_gap(req->q, NULL, bio, req->bio); in req_gap_front_merge() 65 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split() 66 struct bio *bio, in blk_bio_discard_split() argument 89 if (bio_sectors(bio) <= max_discard_sectors) in blk_bio_discard_split() 100 tmp = bio->bi_iter.bi_sector + split_sectors - alignment; in blk_bio_discard_split() [all …]
|
D | blk-lib.c | 13 struct bio *blk_next_bio(struct bio *bio, unsigned int nr_pages, gfp_t gfp) in blk_next_bio() argument 15 struct bio *new = bio_alloc(gfp, nr_pages); in blk_next_bio() 17 if (bio) { in blk_next_bio() 18 bio_chain(bio, new); in blk_next_bio() 19 submit_bio(bio); in blk_next_bio() 27 struct bio **biop) in __blkdev_issue_discard() 30 struct bio *bio = *biop; in __blkdev_issue_discard() local 97 bio = blk_next_bio(bio, 0, gfp_mask); in __blkdev_issue_discard() 98 bio->bi_iter.bi_sector = sector; in __blkdev_issue_discard() 99 bio_set_dev(bio, bdev); in __blkdev_issue_discard() [all …]
|
D | bounce.c | 132 static void copy_to_high_bio_irq(struct bio *to, struct bio *from) in copy_to_high_bio_irq() 162 static void bounce_end_io(struct bio *bio, mempool_t *pool) in bounce_end_io() argument 164 struct bio *bio_orig = bio->bi_private; in bounce_end_io() 172 bio_for_each_segment_all(bvec, bio, iter_all) { in bounce_end_io() 181 bio_orig->bi_status = bio->bi_status; in bounce_end_io() 183 bio_put(bio); in bounce_end_io() 186 static void bounce_end_io_write(struct bio *bio) in bounce_end_io_write() argument 188 bounce_end_io(bio, &page_pool); in bounce_end_io_write() 191 static void bounce_end_io_write_isa(struct bio *bio) in bounce_end_io_write_isa() argument 194 bounce_end_io(bio, &isa_page_pool); in bounce_end_io_write_isa() [all …]
|
D | blk-core.c | 248 static void req_bio_endio(struct request *rq, struct bio *bio, in req_bio_endio() argument 252 bio->bi_status = error; in req_bio_endio() 255 bio_set_flag(bio, BIO_QUIET); in req_bio_endio() 257 bio_advance(bio, nbytes); in req_bio_endio() 264 if (bio->bi_iter.bi_size) in req_bio_endio() 265 bio->bi_status = BLK_STS_IOERR; in req_bio_endio() 267 bio->bi_iter.bi_sector = rq->__sector; in req_bio_endio() 271 if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ)) in req_bio_endio() 272 bio_endio(bio); in req_bio_endio() 285 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags() [all …]
|
D | blk-crypto-internal.h | 26 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio); 32 struct bio *bio) in bio_crypt_ctx_back_mergeable() argument 35 bio->bi_crypt_context); in bio_crypt_ctx_back_mergeable() 39 struct bio *bio) in bio_crypt_ctx_front_mergeable() argument 41 return bio_crypt_ctx_mergeable(bio->bi_crypt_context, in bio_crypt_ctx_front_mergeable() 42 bio->bi_iter.bi_size, req->crypt_ctx); in bio_crypt_ctx_front_mergeable() 71 struct bio *bio) in bio_crypt_rq_ctx_compatible() argument 77 struct bio *bio) in bio_crypt_ctx_front_mergeable() argument 83 struct bio *bio) in bio_crypt_ctx_back_mergeable() argument 108 void __bio_crypt_advance(struct bio *bio, unsigned int bytes); [all …]
|
D | bio-integrity.c | 50 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, in bio_integrity_alloc() argument 55 struct bio_set *bs = bio->bi_pool; in bio_integrity_alloc() 58 if (WARN_ON_ONCE(bio_has_crypt_ctx(bio))) in bio_integrity_alloc() 88 bip->bip_bio = bio; in bio_integrity_alloc() 89 bio->bi_integrity = bip; in bio_integrity_alloc() 90 bio->bi_opf |= REQ_INTEGRITY; in bio_integrity_alloc() 106 void bio_integrity_free(struct bio *bio) in bio_integrity_free() argument 108 struct bio_integrity_payload *bip = bio_integrity(bio); in bio_integrity_free() 109 struct bio_set *bs = bio->bi_pool; in bio_integrity_free() 116 bio->bi_integrity = NULL; in bio_integrity_free() [all …]
|
D | blk.h | 90 static inline void blk_rq_bio_prep(struct request *rq, struct bio *bio, in blk_rq_bio_prep() argument 94 rq->__data_len = bio->bi_iter.bi_size; in blk_rq_bio_prep() 95 rq->bio = rq->biotail = bio; in blk_rq_bio_prep() 96 rq->ioprio = bio_prio(bio); in blk_rq_bio_prep() 98 if (bio->bi_disk) in blk_rq_bio_prep() 99 rq->rq_disk = bio->bi_disk; in blk_rq_bio_prep() 104 bool __bio_integrity_endio(struct bio *); 105 void bio_integrity_free(struct bio *bio); 106 static inline bool bio_integrity_endio(struct bio *bio) in bio_integrity_endio() argument 108 if (bio_integrity(bio)) in bio_integrity_endio() [all …]
|
D | blk-crypto-fallback.c | 51 struct bio *bio; member 146 static void blk_crypto_fallback_encrypt_endio(struct bio *enc_bio) in blk_crypto_fallback_encrypt_endio() 148 struct bio *src_bio = enc_bio->bi_private; in blk_crypto_fallback_encrypt_endio() 161 static struct bio *blk_crypto_clone_bio(struct bio *bio_src) in blk_crypto_clone_bio() 165 struct bio *bio; in blk_crypto_clone_bio() local 167 bio = bio_alloc_bioset(GFP_NOIO, bio_segments(bio_src), NULL); in blk_crypto_clone_bio() 168 if (!bio) in blk_crypto_clone_bio() 170 bio->bi_disk = bio_src->bi_disk; in blk_crypto_clone_bio() 171 bio->bi_opf = bio_src->bi_opf; in blk_crypto_clone_bio() 172 bio->bi_ioprio = bio_src->bi_ioprio; in blk_crypto_clone_bio() [all …]
|
D | blk-rq-qos.h | 39 void (*throttle)(struct rq_qos *, struct bio *); 40 void (*track)(struct rq_qos *, struct request *, struct bio *); 41 void (*merge)(struct rq_qos *, struct request *, struct bio *); 45 void (*done_bio)(struct rq_qos *, struct bio *); 46 void (*cleanup)(struct rq_qos *, struct bio *); 146 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio); 150 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio); 151 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio); 152 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio); 153 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio); [all …]
|
D | blk-crypto.c | 82 void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key, in bio_crypt_set_ctx() argument 98 bio->bi_crypt_context = bc; in bio_crypt_set_ctx() 102 void __bio_crypt_free_ctx(struct bio *bio) in __bio_crypt_free_ctx() argument 104 mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool); in __bio_crypt_free_ctx() 105 bio->bi_crypt_context = NULL; in __bio_crypt_free_ctx() 108 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask) in __bio_crypt_clone() 137 void __bio_crypt_advance(struct bio *bio, unsigned int bytes) in __bio_crypt_advance() argument 139 struct bio_crypt_ctx *bc = bio->bi_crypt_context; in __bio_crypt_advance() 186 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio) in bio_crypt_rq_ctx_compatible() argument 188 return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context); in bio_crypt_rq_ctx_compatible() [all …]
|
D | blk-mq-sched.h | 11 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio, 13 bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, 32 blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio, in blk_mq_sched_bio_merge() argument 35 if (blk_queue_nomerges(q) || !bio_mergeable(bio)) in blk_mq_sched_bio_merge() 38 return __blk_mq_sched_bio_merge(q, bio, nr_segs); in blk_mq_sched_bio_merge() 43 struct bio *bio) in blk_mq_sched_allow_merge() argument 48 return e->type->ops.allow_merge(q, rq, bio); in blk_mq_sched_allow_merge()
|
D | blk-integrity.c | 27 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) in blk_rq_count_integrity_sg() argument 35 bio_for_each_integrity_vec(iv, bio, iter) { in blk_rq_count_integrity_sg() 68 int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, in blk_rq_map_integrity_sg() argument 77 bio_for_each_integrity_vec(iv, bio, iter) { in blk_rq_map_integrity_sg() 173 if (bio_integrity(req->bio)->bip_flags != in blk_integrity_merge_rq() 174 bio_integrity(next->bio)->bip_flags) in blk_integrity_merge_rq() 181 if (integrity_req_gap_back_merge(req, next->bio)) in blk_integrity_merge_rq() 188 struct bio *bio) in blk_integrity_merge_bio() argument 191 struct bio *next = bio->bi_next; in blk_integrity_merge_bio() 193 if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL) in blk_integrity_merge_bio() [all …]
|
D | blk-rq-qos.c | 32 void __rq_qos_cleanup(struct rq_qos *rqos, struct bio *bio) in __rq_qos_cleanup() argument 36 rqos->ops->cleanup(rqos, bio); in __rq_qos_cleanup() 68 void __rq_qos_throttle(struct rq_qos *rqos, struct bio *bio) in __rq_qos_throttle() argument 72 rqos->ops->throttle(rqos, bio); in __rq_qos_throttle() 77 void __rq_qos_track(struct rq_qos *rqos, struct request *rq, struct bio *bio) in __rq_qos_track() argument 81 rqos->ops->track(rqos, rq, bio); in __rq_qos_track() 86 void __rq_qos_merge(struct rq_qos *rqos, struct request *rq, struct bio *bio) in __rq_qos_merge() argument 90 rqos->ops->merge(rqos, rq, bio); in __rq_qos_merge() 95 void __rq_qos_done_bio(struct rq_qos *rqos, struct bio *bio) in __rq_qos_done_bio() argument 99 rqos->ops->done_bio(rqos, bio); in __rq_qos_done_bio()
|
D | blk-throttle.c | 388 static inline unsigned int throtl_bio_data_size(struct bio *bio) in throtl_bio_data_size() argument 391 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) in throtl_bio_data_size() 393 return bio->bi_iter.bi_size; in throtl_bio_data_size() 413 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, in throtl_qnode_add_bio() argument 416 bio_list_add(&qn->bios, bio); in throtl_qnode_add_bio() 427 static struct bio *throtl_peek_queued(struct list_head *queued) in throtl_peek_queued() 430 struct bio *bio; in throtl_peek_queued() local 436 bio = bio_list_peek(&qn->bios); in throtl_peek_queued() 437 WARN_ON_ONCE(!bio); in throtl_peek_queued() 438 return bio; in throtl_peek_queued() [all …]
|
D | blk-wbt.c | 522 static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio) in wbt_should_throttle() argument 524 switch (bio_op(bio)) { in wbt_should_throttle() 529 if ((bio->bi_opf & (REQ_SYNC | REQ_IDLE)) == in wbt_should_throttle() 540 static enum wbt_flags bio_to_wbt_flags(struct rq_wb *rwb, struct bio *bio) in bio_to_wbt_flags() argument 547 if (bio_op(bio) == REQ_OP_READ) { in bio_to_wbt_flags() 549 } else if (wbt_should_throttle(rwb, bio)) { in bio_to_wbt_flags() 552 if (bio_op(bio) == REQ_OP_DISCARD) in bio_to_wbt_flags() 559 static void wbt_cleanup(struct rq_qos *rqos, struct bio *bio) in wbt_cleanup() argument 562 enum wbt_flags flags = bio_to_wbt_flags(rwb, bio); in wbt_cleanup() 572 static void wbt_wait(struct rq_qos *rqos, struct bio *bio) in wbt_wait() argument [all …]
|
D | blk-cgroup.c | 122 struct bio *bio; in blkg_async_bio_workfn() local 137 while ((bio = bio_list_pop(&bios))) in blkg_async_bio_workfn() 138 submit_bio(bio); in blkg_async_bio_workfn() 1578 bool __blkcg_punt_bio_submit(struct bio *bio) in __blkcg_punt_bio_submit() argument 1580 struct blkcg_gq *blkg = bio->bi_blkg; in __blkcg_punt_bio_submit() 1583 bio->bi_opf &= ~REQ_CGROUP_PUNT; in __blkcg_punt_bio_submit() 1590 bio_list_add(&blkg->async_bios, bio); in __blkcg_punt_bio_submit() 1825 static inline struct blkcg_gq *blkg_tryget_closest(struct bio *bio, in blkg_tryget_closest() argument 1831 blkg = blkg_lookup_create(css_to_blkcg(css), bio->bi_disk->queue); in blkg_tryget_closest() 1858 void bio_associate_blkg_from_css(struct bio *bio, in bio_associate_blkg_from_css() argument [all …]
|
D | blk-ioprio.c | 84 static struct ioprio_blkcg *ioprio_blkcg_from_bio(struct bio *bio) in ioprio_blkcg_from_bio() argument 86 struct blkg_policy_data *pd = blkg_to_pd(bio->bi_blkg, &ioprio_policy); in ioprio_blkcg_from_bio() 190 struct bio *bio) in blkcg_ioprio_track() argument 192 struct ioprio_blkcg *blkcg = ioprio_blkcg_from_bio(bio); in blkcg_ioprio_track() 202 bio->bi_ioprio = max_t(u16, bio->bi_ioprio, in blkcg_ioprio_track()
|
D | t10-pi.c | 137 struct bio *bio; in t10_pi_type1_prepare() local 139 __rq_for_each_bio(bio, rq) { in t10_pi_type1_prepare() 140 struct bio_integrity_payload *bip = bio_integrity(bio); in t10_pi_type1_prepare() 189 struct bio *bio; in t10_pi_type1_complete() local 191 __rq_for_each_bio(bio, rq) { in t10_pi_type1_complete() 192 struct bio_integrity_payload *bip = bio_integrity(bio); in t10_pi_type1_complete()
|
D | elevator.c | 60 static int elv_iosched_allow_bio_merge(struct request *rq, struct bio *bio) in elv_iosched_allow_bio_merge() argument 66 return e->type->ops.allow_merge(q, rq, bio); in elv_iosched_allow_bio_merge() 74 bool elv_bio_merge_ok(struct request *rq, struct bio *bio) in elv_bio_merge_ok() argument 76 if (!blk_rq_merge_ok(rq, bio)) in elv_bio_merge_ok() 79 if (!elv_iosched_allow_bio_merge(rq, bio)) in elv_bio_merge_ok() 304 struct bio *bio) in elv_merge() argument 315 if (blk_queue_nomerges(q) || !bio_mergeable(bio)) in elv_merge() 321 if (q->last_merge && elv_bio_merge_ok(q->last_merge, bio)) { in elv_merge() 322 enum elv_merge ret = blk_try_merge(q->last_merge, bio); in elv_merge() 336 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); in elv_merge() [all …]
|
D | blk-flush.c | 128 rq->bio = rq->biotail; in blk_flush_restore_request() 418 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ in blk_insert_flush() 457 struct bio *bio; in blkdev_issue_flush() local 460 bio = bio_alloc(gfp_mask, 0); in blkdev_issue_flush() 461 bio_set_dev(bio, bdev); in blkdev_issue_flush() 462 bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; in blkdev_issue_flush() 464 ret = submit_bio_wait(bio); in blkdev_issue_flush() 465 bio_put(bio); in blkdev_issue_flush()
|
D | blk-zoned.c | 210 struct bio *bio = NULL; in blkdev_zone_mgmt() local 234 bio = blk_next_bio(bio, 0, gfp_mask); in blkdev_zone_mgmt() 235 bio_set_dev(bio, bdev); in blkdev_zone_mgmt() 243 bio->bi_opf = REQ_OP_ZONE_RESET_ALL | REQ_SYNC; in blkdev_zone_mgmt() 247 bio->bi_opf = op | REQ_SYNC; in blkdev_zone_mgmt() 248 bio->bi_iter.bi_sector = sector; in blkdev_zone_mgmt() 255 ret = submit_bio_wait(bio); in blkdev_zone_mgmt() 256 bio_put(bio); in blkdev_zone_mgmt()
|
D | mq-deadline-cgroup.h | 81 struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio); 89 static inline struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio) in dd_blkcg_from_bio() argument
|
D | blk-iolatency.c | 465 static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio) in blkcg_iolatency_throttle() argument 468 struct blkcg_gq *blkg = bio->bi_blkg; in blkcg_iolatency_throttle() 469 bool issue_as_root = bio_issue_as_root_blkg(bio); in blkcg_iolatency_throttle() 483 (bio->bi_opf & REQ_SWAP) == REQ_SWAP); in blkcg_iolatency_throttle() 593 static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) in blkcg_iolatency_done_bio() argument 600 bool issue_as_root = bio_issue_as_root_blkg(bio); in blkcg_iolatency_done_bio() 603 blkg = bio->bi_blkg; in blkcg_iolatency_done_bio() 604 if (!blkg || !bio_flagged(bio, BIO_TRACKED)) in blkcg_iolatency_done_bio() 607 iolat = blkg_to_lat(bio->bi_blkg); in blkcg_iolatency_done_bio() 629 if (iolat->min_lat_nsec && bio->bi_status != BLK_STS_AGAIN) { in blkcg_iolatency_done_bio() [all …]
|