/block/ |
D | bio.c | 73 unsigned int sz = sizeof(struct bio) + extra_size; in bio_find_or_create_slab() 237 static void __bio_free(struct bio *bio) in __bio_free() argument 239 bio_disassociate_task(bio); in __bio_free() 241 if (bio_integrity(bio)) in __bio_free() 242 bio_integrity_free(bio); in __bio_free() 245 static void bio_free(struct bio *bio) in bio_free() argument 247 struct bio_set *bs = bio->bi_pool; in bio_free() 250 __bio_free(bio); in bio_free() 253 if (bio_flagged(bio, BIO_OWNS_VEC)) in bio_free() 254 bvec_free(bs->bvec_pool, bio->bi_io_vec, BIO_POOL_IDX(bio)); in bio_free() [all …]
|
D | blk-merge.c | 12 static struct bio *blk_bio_discard_split(struct request_queue *q, in blk_bio_discard_split() 13 struct bio *bio, in blk_bio_discard_split() argument 35 if (bio_sectors(bio) <= max_discard_sectors) in blk_bio_discard_split() 46 tmp = bio->bi_iter.bi_sector + split_sectors - alignment; in blk_bio_discard_split() 52 return bio_split(bio, split_sectors, GFP_NOIO, bs); in blk_bio_discard_split() 55 static struct bio *blk_bio_write_same_split(struct request_queue *q, in blk_bio_write_same_split() 56 struct bio *bio, in blk_bio_write_same_split() argument 65 if (bio_sectors(bio) <= q->limits.max_write_same_sectors) in blk_bio_write_same_split() 68 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs); in blk_bio_write_same_split() 72 struct bio *bio) in get_max_io_size() argument [all …]
|
D | blk-map.c | 31 struct bio *bio) in blk_rq_append_bio() argument 33 if (!rq->bio) in blk_rq_append_bio() 34 blk_rq_bio_prep(q, rq, bio); in blk_rq_append_bio() 35 else if (!ll_back_merge_fn(q, rq, bio)) in blk_rq_append_bio() 38 rq->biotail->bi_next = bio; in blk_rq_append_bio() 39 rq->biotail = bio; in blk_rq_append_bio() 41 rq->__data_len += bio->bi_iter.bi_size; in blk_rq_append_bio() 46 static int __blk_rq_unmap_user(struct bio *bio) in __blk_rq_unmap_user() argument 50 if (bio) { in __blk_rq_unmap_user() 51 if (bio_flagged(bio, BIO_USER_MAPPED)) in __blk_rq_unmap_user() [all …]
|
D | bio-integrity.c | 50 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio, in bio_integrity_alloc() argument 55 struct bio_set *bs = bio->bi_pool; in bio_integrity_alloc() 85 bip->bip_bio = bio; in bio_integrity_alloc() 86 bio->bi_integrity = bip; in bio_integrity_alloc() 87 bio->bi_rw |= REQ_INTEGRITY; in bio_integrity_alloc() 103 void bio_integrity_free(struct bio *bio) in bio_integrity_free() argument 105 struct bio_integrity_payload *bip = bio_integrity(bio); in bio_integrity_free() 106 struct bio_set *bs = bio->bi_pool; in bio_integrity_free() 122 bio->bi_integrity = NULL; in bio_integrity_free() 135 int bio_integrity_add_page(struct bio *bio, struct page *page, in bio_integrity_add_page() argument [all …]
|
D | bounce.c | 102 static void copy_to_high_bio_irq(struct bio *to, struct bio *from) in copy_to_high_bio_irq() 126 static void bounce_end_io(struct bio *bio, mempool_t *pool) in bounce_end_io() argument 128 struct bio *bio_orig = bio->bi_private; in bounce_end_io() 136 bio_for_each_segment_all(bvec, bio, i) { in bounce_end_io() 146 bio_orig->bi_error = bio->bi_error; in bounce_end_io() 148 bio_put(bio); in bounce_end_io() 151 static void bounce_end_io_write(struct bio *bio) in bounce_end_io_write() argument 153 bounce_end_io(bio, page_pool); in bounce_end_io_write() 156 static void bounce_end_io_write_isa(struct bio *bio) in bounce_end_io_write_isa() argument 159 bounce_end_io(bio, isa_page_pool); in bounce_end_io_write_isa() [all …]
|
D | blk-lib.c | 18 static void bio_batch_end_io(struct bio *bio) in bio_batch_end_io() argument 20 struct bio_batch *bb = bio->bi_private; in bio_batch_end_io() 22 if (bio->bi_error && bio->bi_error != -EOPNOTSUPP) in bio_batch_end_io() 23 bb->error = bio->bi_error; in bio_batch_end_io() 26 bio_put(bio); in bio_batch_end_io() 49 struct bio *bio; in blkdev_issue_discard() local 78 bio = bio_alloc(gfp_mask, 1); in blkdev_issue_discard() 79 if (!bio) { in blkdev_issue_discard() 101 bio->bi_iter.bi_sector = sector; in blkdev_issue_discard() 102 bio->bi_end_io = bio_batch_end_io; in blkdev_issue_discard() [all …]
|
D | blk-core.c | 144 static void req_bio_endio(struct request *rq, struct bio *bio, in req_bio_endio() argument 148 bio->bi_error = error; in req_bio_endio() 151 bio_set_flag(bio, BIO_QUIET); in req_bio_endio() 153 bio_advance(bio, nbytes); in req_bio_endio() 156 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) in req_bio_endio() 157 bio_endio(bio); in req_bio_endio() 172 rq->bio, rq->biotail, blk_rq_bytes(rq)); in blk_dump_rq_flags() 831 static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio); 1019 static bool blk_rq_should_init_elevator(struct bio *bio) in blk_rq_should_init_elevator() argument 1021 if (!bio) in blk_rq_should_init_elevator() [all …]
|
D | blk-throttle.c | 250 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, in throtl_qnode_add_bio() argument 253 bio_list_add(&qn->bios, bio); in throtl_qnode_add_bio() 264 static struct bio *throtl_peek_queued(struct list_head *queued) in throtl_peek_queued() 267 struct bio *bio; in throtl_peek_queued() local 272 bio = bio_list_peek(&qn->bios); in throtl_peek_queued() 273 WARN_ON_ONCE(!bio); in throtl_peek_queued() 274 return bio; in throtl_peek_queued() 291 static struct bio *throtl_pop_queued(struct list_head *queued, in throtl_pop_queued() 295 struct bio *bio; in throtl_pop_queued() local 300 bio = bio_list_pop(&qn->bios); in throtl_pop_queued() [all …]
|
D | blk.h | 64 void init_request_from_bio(struct request *req, struct bio *bio); 66 struct bio *bio); 68 struct bio *bio); 103 struct bio *bio); 105 struct bio *bio); 106 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio, 210 struct bio *bio); 212 struct bio *bio); 219 bool blk_rq_merge_ok(struct request *rq, struct bio *bio); 220 int blk_try_merge(struct request *rq, struct bio *bio);
|
D | blk-integrity.c | 41 int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) in blk_rq_count_integrity_sg() argument 49 bio_for_each_integrity_vec(iv, bio, iter) { in blk_rq_count_integrity_sg() 86 int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, in blk_rq_map_integrity_sg() argument 95 bio_for_each_integrity_vec(iv, bio, iter) { in blk_rq_map_integrity_sg() 195 if (bio_integrity(req->bio)->bip_flags != in blk_integrity_merge_rq() 196 bio_integrity(next->bio)->bip_flags) in blk_integrity_merge_rq() 203 if (integrity_req_gap_back_merge(req, next->bio)) in blk_integrity_merge_rq() 211 struct bio *bio) in blk_integrity_merge_bio() argument 214 struct bio *next = bio->bi_next; in blk_integrity_merge_bio() 216 if (blk_integrity_rq(req) == 0 && bio_integrity(bio) == NULL) in blk_integrity_merge_bio() [all …]
|
D | blk-mq.c | 670 struct blk_mq_ctx *ctx, struct bio *bio) in blk_mq_attempt_merge() argument 681 if (!blk_rq_merge_ok(rq, bio)) in blk_mq_attempt_merge() 684 el_ret = blk_try_merge(rq, bio); in blk_mq_attempt_merge() 686 if (bio_attempt_back_merge(q, rq, bio)) { in blk_mq_attempt_merge() 692 if (bio_attempt_front_merge(q, rq, bio)) { in blk_mq_attempt_merge() 1132 static void blk_mq_bio_to_request(struct request *rq, struct bio *bio) in blk_mq_bio_to_request() argument 1134 init_request_from_bio(rq, bio); in blk_mq_bio_to_request() 1148 struct request *rq, struct bio *bio) in blk_mq_merge_queue_io() argument 1150 if (!hctx_allow_merges(hctx) || !bio_mergeable(bio)) { in blk_mq_merge_queue_io() 1151 blk_mq_bio_to_request(rq, bio); in blk_mq_merge_queue_io() [all …]
|
D | elevator.c | 56 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) in elv_iosched_allow_merge() argument 62 return e->type->ops.elevator_allow_merge_fn(q, rq, bio); in elv_iosched_allow_merge() 70 bool elv_rq_merge_ok(struct request *rq, struct bio *bio) in elv_rq_merge_ok() argument 72 if (!blk_rq_merge_ok(rq, bio)) in elv_rq_merge_ok() 75 if (!elv_iosched_allow_merge(rq, bio)) in elv_rq_merge_ok() 411 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) in elv_merge() argument 423 if (blk_queue_nomerges(q) || !bio_mergeable(bio)) in elv_merge() 429 if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) { in elv_merge() 430 ret = blk_try_merge(q->last_merge, bio); in elv_merge() 443 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector); in elv_merge() [all …]
|
D | bsg.c | 84 struct bio *bio; member 85 struct bio *bidi_bio; 292 blk_rq_unmap_user(next_rq->bio); in bsg_map_hdr() 309 bd->name, rq, bc, bc->bio, uptodate); in bsg_rq_end_io() 334 bc->bio = rq->bio; in bsg_add_command() 336 bc->bidi_bio = rq->next_rq->bio; in bsg_add_command() 394 struct bio *bio, struct bio *bidi_bio) in blk_complete_sgv4_hdr_rq() argument 398 dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); in blk_complete_sgv4_hdr_rq() 441 blk_rq_unmap_user(bio); in blk_complete_sgv4_hdr_rq() 501 tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio, in bsg_complete_all_commands() [all …]
|
D | blk-flush.c | 126 rq->bio = rq->biotail; in blk_flush_restore_request() 413 BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ in blk_insert_flush() 466 struct bio *bio; in blkdev_issue_flush() local 485 bio = bio_alloc(gfp_mask, 0); in blkdev_issue_flush() 486 bio->bi_bdev = bdev; in blkdev_issue_flush() 488 ret = submit_bio_wait(WRITE_FLUSH, bio); in blkdev_issue_flush() 496 *error_sector = bio->bi_iter.bi_sector; in blkdev_issue_flush() 498 bio_put(bio); in blkdev_issue_flush()
|
D | deadline-iosched.c | 125 deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) in deadline_merge() argument 135 sector_t sector = bio_end_sector(bio); in deadline_merge() 137 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); in deadline_merge() 141 if (elv_rq_merge_ok(__rq, bio)) { in deadline_merge()
|
D | scsi_ioctl.c | 255 struct bio *bio) in blk_complete_sghdr_rq() argument 282 r = blk_rq_unmap_user(bio); in blk_complete_sghdr_rq() 298 struct bio *bio; in sg_io() local 359 bio = rq->bio; in sg_io() 375 ret = blk_complete_sghdr_rq(rq, hdr, bio); in sg_io()
|
D | Makefile | 5 obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \ 24 obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
|
D | cfq-iosched.c | 863 struct cfq_io_cq *cic, struct bio *bio); 899 static inline bool cfq_bio_sync(struct bio *bio) in cfq_bio_sync() argument 901 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC); in cfq_bio_sync() 2459 cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) in cfq_find_rq_fmerge() argument 2469 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); in cfq_find_rq_fmerge() 2471 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio)); in cfq_find_rq_fmerge() 2516 struct bio *bio) in cfq_merge() argument 2521 __rq = cfq_find_rq_fmerge(cfqd, bio); in cfq_merge() 2522 if (__rq && elv_rq_merge_ok(__rq, bio)) { in cfq_merge() 2541 struct bio *bio) in cfq_bio_merged() argument [all …]
|
D | bsg-lib.c | 132 if (req->bio) { in bsg_create_job() 137 if (rsp && rsp->bio) { in bsg_create_job()
|
D | Kconfig | 92 bool "Block layer bio throttling support" 96 Block layer bio throttling support. It can be used to limit
|