Lines Matching refs:bio
72 struct bio *bio; member
82 void *dm_per_bio_data(struct bio *bio, size_t data_size) in dm_per_bio_data() argument
84 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); in dm_per_bio_data()
86 return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; in dm_per_bio_data()
87 return (char *)bio - DM_IO_BIO_OFFSET - data_size; in dm_per_bio_data()
91 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) in dm_bio_from_per_bio_data()
95 return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); in dm_bio_from_per_bio_data()
97 return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); in dm_bio_from_per_bio_data()
101 unsigned dm_bio_get_target_bio_nr(const struct bio *bio) in dm_bio_get_target_bio_nr() argument
103 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; in dm_bio_get_target_bio_nr()
477 u64 dm_start_time_ns_from_clone(struct bio *bio) in dm_start_time_ns_from_clone() argument
479 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); in dm_start_time_ns_from_clone()
486 static bool bio_is_flush_with_data(struct bio *bio) in bio_is_flush_with_data() argument
488 return ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size); in bio_is_flush_with_data()
491 static void dm_io_acct(bool end, struct mapped_device *md, struct bio *bio, in dm_io_acct() argument
498 is_flush_with_data = bio_is_flush_with_data(bio); in dm_io_acct()
500 bi_size = bio->bi_iter.bi_size; in dm_io_acct()
501 bio->bi_iter.bi_size = 0; in dm_io_acct()
505 bio_start_io_acct_time(bio, start_time); in dm_io_acct()
507 bio_end_io_acct(bio, start_time); in dm_io_acct()
510 dm_stats_account_io(&md->stats, bio_data_dir(bio), in dm_io_acct()
511 bio->bi_iter.bi_sector, bio_sectors(bio), in dm_io_acct()
516 bio->bi_iter.bi_size = bi_size; in dm_io_acct()
524 static void end_io_acct(struct mapped_device *md, struct bio *bio, in end_io_acct() argument
527 dm_io_acct(true, md, bio, start_time, stats_aux); in end_io_acct()
530 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) in alloc_io() argument
534 struct bio *clone; in alloc_io()
549 io->orig_bio = bio; in alloc_io()
574 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); in alloc_tio()
600 static void queue_io(struct mapped_device *md, struct bio *bio) in queue_io() argument
605 bio_list_add(&md->deferred, bio); in queue_io()
807 struct bio *bio; in dm_io_dec_pending() local
821 bio = io->orig_bio; in dm_io_dec_pending()
828 !WARN_ON_ONCE(dm_is_zone_write(md, bio))) { in dm_io_dec_pending()
830 bio_list_add_head(&md->deferred, bio); in dm_io_dec_pending()
845 end_io_acct(md, bio, start_time, &stats_aux); in dm_io_dec_pending()
856 if (bio_is_flush_with_data(bio)) { in dm_io_dec_pending()
861 bio->bi_opf &= ~REQ_PREFLUSH; in dm_io_dec_pending()
862 queue_io(md, bio); in dm_io_dec_pending()
866 bio->bi_status = io_error; in dm_io_dec_pending()
867 bio_endio(bio); in dm_io_dec_pending()
897 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) in swap_bios_limit() argument
899 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); in swap_bios_limit()
902 static void clone_endio(struct bio *bio) in clone_endio() argument
904 blk_status_t error = bio->bi_status; in clone_endio()
905 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); in clone_endio()
909 struct request_queue *q = bio->bi_bdev->bd_disk->queue; in clone_endio()
912 if (bio_op(bio) == REQ_OP_DISCARD && in clone_endio()
915 else if (bio_op(bio) == REQ_OP_WRITE_SAME && in clone_endio()
918 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && in clone_endio()
924 dm_zone_endio(io, bio); in clone_endio()
927 int r = endio(tio->ti, bio, &error); in clone_endio()
935 if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) in clone_endio()
951 if (unlikely(swap_bios_limit(tio->ti, bio))) { in clone_endio()
1177 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) in dm_accept_partial_bio() argument
1179 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); in dm_accept_partial_bio()
1180 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; in dm_accept_partial_bio()
1182 BUG_ON(bio->bi_opf & REQ_PREFLUSH); in dm_accept_partial_bio()
1183 BUG_ON(op_is_zone_mgmt(bio_op(bio))); in dm_accept_partial_bio()
1184 BUG_ON(bio_op(bio) == REQ_OP_ZONE_APPEND); in dm_accept_partial_bio()
1189 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; in dm_accept_partial_bio()
1213 struct bio *clone = &tio->clone; in __map_bio()
1278 static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len) in bio_setup_sector() argument
1280 bio->bi_iter.bi_sector = sector; in bio_setup_sector()
1281 bio->bi_iter.bi_size = to_bytes(len); in bio_setup_sector()
1287 static int clone_bio(struct dm_target_io *tio, struct bio *bio, in clone_bio() argument
1290 struct bio *clone = &tio->clone; in clone_bio()
1293 __bio_clone_fast(clone, bio); in clone_bio()
1294 trace_android_vh_dm_update_clone_bio(clone, bio); in clone_bio()
1295 r = bio_crypt_clone(clone, bio, GFP_NOIO); in clone_bio()
1299 if (bio_integrity(bio)) { in clone_bio()
1308 r = bio_integrity_clone(clone, bio, GFP_NOIO); in clone_bio()
1316 if (bio_integrity(bio)) in clone_bio()
1339 struct bio *bio; in alloc_multiple_bios() local
1355 while ((bio = bio_list_pop(blist))) { in alloc_multiple_bios()
1356 tio = container_of(bio, struct dm_target_io, clone); in alloc_multiple_bios()
1365 struct bio *clone = &tio->clone; in __clone_and_map_simple_bio()
1369 __bio_clone_fast(clone, ci->bio); in __clone_and_map_simple_bio()
1380 struct bio *bio; in __send_duplicate_bios() local
1385 while ((bio = bio_list_pop(&blist))) { in __send_duplicate_bios()
1386 tio = container_of(bio, struct dm_target_io, clone); in __send_duplicate_bios()
1395 struct bio flush_bio; in __send_empty_flush()
1406 ci->bio = &flush_bio; in __send_empty_flush()
1409 BUG_ON(bio_has_data(ci->bio)); in __send_empty_flush()
1413 bio_uninit(ci->bio); in __send_empty_flush()
1420 struct bio *bio = ci->bio; in __clone_and_map_data_bio() local
1426 r = clone_bio(tio, bio, sector, *len); in __clone_and_map_data_bio()
1461 static bool is_abnormal_io(struct bio *bio) in is_abnormal_io() argument
1465 switch (bio_op(bio)) { in is_abnormal_io()
1480 struct bio *bio = ci->bio; in __process_abnormal_io() local
1483 switch (bio_op(bio)) { in __process_abnormal_io()
1533 struct dm_table *map, struct bio *bio) in init_clone_info() argument
1536 ci->io = alloc_io(md, bio); in init_clone_info()
1537 ci->sector = bio->bi_iter.bi_sector; in init_clone_info()
1544 struct dm_table *map, struct bio *bio) in __split_and_process_bio() argument
1550 init_clone_info(&ci, md, map, bio); in __split_and_process_bio()
1552 if (bio->bi_opf & REQ_PREFLUSH) { in __split_and_process_bio()
1555 } else if (op_is_zone_mgmt(bio_op(bio))) { in __split_and_process_bio()
1556 ci.bio = bio; in __split_and_process_bio()
1560 ci.bio = bio; in __split_and_process_bio()
1561 ci.sector_count = bio_sectors(bio); in __split_and_process_bio()
1572 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, in __split_and_process_bio()
1576 bio_chain(b, bio); in __split_and_process_bio()
1577 trace_block_split(b, bio->bi_iter.bi_sector); in __split_and_process_bio()
1578 ret = submit_bio_noacct(bio); in __split_and_process_bio()
1588 static blk_qc_t dm_submit_bio(struct bio *bio) in dm_submit_bio() argument
1590 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; in dm_submit_bio()
1600 if (bio->bi_opf & REQ_NOWAIT) in dm_submit_bio()
1601 bio_wouldblock_error(bio); in dm_submit_bio()
1602 else if (bio->bi_opf & REQ_RAHEAD) in dm_submit_bio()
1603 bio_io_error(bio); in dm_submit_bio()
1605 queue_io(md, bio); in dm_submit_bio()
1613 if (is_abnormal_io(bio)) in dm_submit_bio()
1614 blk_queue_split(&bio); in dm_submit_bio()
1616 ret = __split_and_process_bio(md, map, bio); in dm_submit_bio()
2299 struct bio *bio; in dm_wq_work() local
2303 bio = bio_list_pop(&md->deferred); in dm_wq_work()
2306 if (!bio) in dm_wq_work()
2309 submit_bio_noacct(bio); in dm_wq_work()