• Home
  • Raw
  • Download

Lines Matching +full:suspend +full:- +full:to +full:- +full:disk

3  * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
8 #include "dm-core.h"
9 #include "dm-rq.h"
10 #include "dm-uevent.h"
30 #include <linux/blk-crypto.h>
66 * One of these is allocated (on-stack) per original bio.
111 if (!tio->inside_dm_io) in dm_per_bio_data()
112 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; in dm_per_bio_data()
113 …return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_siz… in dm_per_bio_data()
120 if (io->magic == DM_IO_MAGIC) in dm_bio_from_per_bio_data()
122 BUG_ON(io->magic != DM_TIO_MAGIC); in dm_bio_from_per_bio_data()
129 return container_of(bio, struct dm_target_io, clone)->target_bio_nr; in dm_bio_get_target_bio_nr()
133 #define MINOR_ALLOCED ((void *)-1)
136 * Bits for the md->flags field.
162 * For mempools pre-allocation at the table loading time.
176 * Bio-based DM's mempools' reserved IOs set by the user.
231 DM_NUMA_NODE, num_online_nodes() - 1); in dm_get_numa_node()
244 r = -ENOMEM; in local_init()
316 while (i--) in dm_init()
326 while (i--) in dm_exit()
340 return test_bit(DMF_DELETING, &md->flags); in dm_deleting_md()
349 md = bdev->bd_disk->private_data; in dm_blk_open()
353 if (test_bit(DMF_FREEING, &md->flags) || in dm_blk_open()
360 atomic_inc(&md->open_count); in dm_blk_open()
364 return md ? 0 : -ENXIO; in dm_blk_open()
367 static void dm_blk_close(struct gendisk *disk, fmode_t mode) in dm_blk_close() argument
373 md = disk->private_data; in dm_blk_close()
377 if (atomic_dec_and_test(&md->open_count) && in dm_blk_close()
378 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) in dm_blk_close()
388 return atomic_read(&md->open_count); in dm_open_count()
401 r = -EBUSY; in dm_lock_for_deletion()
403 set_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_lock_for_deletion()
404 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) in dm_lock_for_deletion()
405 r = -EEXIST; in dm_lock_for_deletion()
407 set_bit(DMF_DELETING, &md->flags); in dm_lock_for_deletion()
420 if (test_bit(DMF_DELETING, &md->flags)) in dm_cancel_deferred_remove()
421 r = -EBUSY; in dm_cancel_deferred_remove()
423 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_cancel_deferred_remove()
437 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_getgeo()
446 sector_t sector_diff = args->tgt->begin - args->start; in dm_report_zones_cb()
451 if (zone->start >= args->start + args->tgt->len) in dm_report_zones_cb()
456 * to match its position in the target range. in dm_report_zones_cb()
458 zone->start += sector_diff; in dm_report_zones_cb()
459 if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { in dm_report_zones_cb()
460 if (zone->cond == BLK_ZONE_COND_FULL) in dm_report_zones_cb()
461 zone->wp = zone->start + zone->len; in dm_report_zones_cb()
462 else if (zone->cond == BLK_ZONE_COND_EMPTY) in dm_report_zones_cb()
463 zone->wp = zone->start; in dm_report_zones_cb()
465 zone->wp += sector_diff; in dm_report_zones_cb()
468 args->next_sector = zone->start + zone->len; in dm_report_zones_cb()
469 return args->orig_cb(zone, args->zone_idx++, args->orig_data); in dm_report_zones_cb()
473 static int dm_blk_report_zones(struct gendisk *disk, sector_t sector, in dm_blk_report_zones() argument
476 struct mapped_device *md = disk->private_data; in dm_blk_report_zones()
486 return -EAGAIN; in dm_blk_report_zones()
490 ret = -EIO; in dm_blk_report_zones()
498 if (WARN_ON_ONCE(!tgt->type->report_zones)) { in dm_blk_report_zones()
499 ret = -EIO; in dm_blk_report_zones()
504 ret = tgt->type->report_zones(tgt, &args, in dm_blk_report_zones()
505 nr_zones - args.zone_idx); in dm_blk_report_zones()
509 args.next_sector < get_capacity(disk)); in dm_blk_report_zones()
528 r = -ENOTTY; in dm_prepare_ioctl()
538 if (!tgt->type->prepare_ioctl) in dm_prepare_ioctl()
542 return -EAGAIN; in dm_prepare_ioctl()
544 r = tgt->type->prepare_ioctl(tgt, bdev); in dm_prepare_ioctl()
545 if (r == -ENOTCONN && !fatal_signal_pending(current)) { in dm_prepare_ioctl()
562 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_ioctl()
576 "%s: sending ioctl %x to DM device without required privilege.", in dm_blk_ioctl()
577 current->comm, cmd); in dm_blk_ioctl()
578 r = -ENOIOCTLCMD; in dm_blk_ioctl()
592 struct dm_io *io = tio->io; in dm_start_time_ns_from_clone()
594 return jiffies_to_nsecs(io->start_time); in dm_start_time_ns_from_clone()
600 struct mapped_device *md = io->md; in start_io_acct()
601 struct bio *bio = io->orig_bio; in start_io_acct()
603 io->start_time = bio_start_io_acct(bio); in start_io_acct()
604 if (unlikely(dm_stats_used(&md->stats))) in start_io_acct()
605 dm_stats_account_io(&md->stats, bio_data_dir(bio), in start_io_acct()
606 bio->bi_iter.bi_sector, bio_sectors(bio), in start_io_acct()
607 false, 0, &io->stats_aux); in start_io_acct()
612 struct mapped_device *md = io->md; in end_io_acct()
613 struct bio *bio = io->orig_bio; in end_io_acct()
614 unsigned long duration = jiffies - io->start_time; in end_io_acct()
616 bio_end_io_acct(bio, io->start_time); in end_io_acct()
618 if (unlikely(dm_stats_used(&md->stats))) in end_io_acct()
619 dm_stats_account_io(&md->stats, bio_data_dir(bio), in end_io_acct()
620 bio->bi_iter.bi_sector, bio_sectors(bio), in end_io_acct()
621 true, duration, &io->stats_aux); in end_io_acct()
623 /* nudge anyone waiting on suspend queue */ in end_io_acct()
624 if (unlikely(wq_has_sleeper(&md->wait))) in end_io_acct()
625 wake_up(&md->wait); in end_io_acct()
634 clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); in alloc_io()
639 tio->inside_dm_io = true; in alloc_io()
640 tio->io = NULL; in alloc_io()
643 io->magic = DM_IO_MAGIC; in alloc_io()
644 io->status = 0; in alloc_io()
645 atomic_set(&io->io_count, 1); in alloc_io()
646 io->orig_bio = bio; in alloc_io()
647 io->md = md; in alloc_io()
648 spin_lock_init(&io->endio_lock); in alloc_io()
657 bio_put(&io->tio.clone); in free_io()
665 if (!ci->io->tio.io) { in alloc_tio()
666 /* the dm_target_io embedded in ci->io is available */ in alloc_tio()
667 tio = &ci->io->tio; in alloc_tio()
669 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); in alloc_tio()
674 tio->inside_dm_io = false; in alloc_tio()
677 tio->magic = DM_TIO_MAGIC; in alloc_tio()
678 tio->io = ci->io; in alloc_tio()
679 tio->ti = ti; in alloc_tio()
680 tio->target_bio_nr = target_bio_nr; in alloc_tio()
687 if (tio->inside_dm_io) in free_tio()
689 bio_put(&tio->clone); in free_tio()
693 * Add the bio to the list of deferred io.
699 spin_lock_irqsave(&md->deferred_lock, flags); in queue_io()
700 bio_list_add(&md->deferred, bio); in queue_io()
701 spin_unlock_irqrestore(&md->deferred_lock, flags); in queue_io()
702 queue_work(md->wq, &md->work); in queue_io()
707 * function to access the md->map field, and make sure they call
710 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barri… in dm_get_live_table()
712 *srcu_idx = srcu_read_lock(&md->io_barrier); in dm_get_live_table()
714 return srcu_dereference(md->map, &md->io_barrier); in dm_get_live_table()
717 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) in dm_put_live_table()
719 srcu_read_unlock(&md->io_barrier, srcu_idx); in dm_put_live_table()
724 synchronize_srcu(&md->io_barrier); in dm_sync_table()
729 * A fast alternative to dm_get_live_table/dm_put_live_table.
735 return rcu_dereference(md->map); in dm_get_live_table_fast()
743 static char *_dm_claim_ptr = "I belong to device-mapper";
755 BUG_ON(td->dm_dev.bdev); in open_table_device()
757 bdev = blkdev_get_by_dev(dev, td->dm_dev.mode | FMODE_EXCL, _dm_claim_ptr); in open_table_device()
763 blkdev_put(bdev, td->dm_dev.mode | FMODE_EXCL); in open_table_device()
767 td->dm_dev.bdev = bdev; in open_table_device()
768 td->dm_dev.dax_dev = dax_get_by_host(bdev->bd_disk->disk_name); in open_table_device()
777 if (!td->dm_dev.bdev) in close_table_device()
780 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); in close_table_device()
781 blkdev_put(td->dm_dev.bdev, td->dm_dev.mode | FMODE_EXCL); in close_table_device()
782 put_dax(td->dm_dev.dax_dev); in close_table_device()
783 td->dm_dev.bdev = NULL; in close_table_device()
784 td->dm_dev.dax_dev = NULL; in close_table_device()
793 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode) in find_table_device()
805 mutex_lock(&md->table_devices_lock); in dm_get_table_device()
806 td = find_table_device(&md->table_devices, dev, mode); in dm_get_table_device()
808 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); in dm_get_table_device()
810 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
811 return -ENOMEM; in dm_get_table_device()
814 td->dm_dev.mode = mode; in dm_get_table_device()
815 td->dm_dev.bdev = NULL; in dm_get_table_device()
818 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
823 format_dev_t(td->dm_dev.name, dev); in dm_get_table_device()
825 refcount_set(&td->count, 1); in dm_get_table_device()
826 list_add(&td->list, &md->table_devices); in dm_get_table_device()
828 refcount_inc(&td->count); in dm_get_table_device()
830 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
832 *result = &td->dm_dev; in dm_get_table_device()
841 mutex_lock(&md->table_devices_lock); in dm_put_table_device()
842 if (refcount_dec_and_test(&td->count)) { in dm_put_table_device()
844 list_del(&td->list); in dm_put_table_device()
847 mutex_unlock(&md->table_devices_lock); in dm_put_table_device()
859 td->dm_dev.name, refcount_read(&td->count)); in free_table_devices()
869 *geo = md->geometry; in dm_get_geometry()
879 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; in dm_set_geometry()
881 if (geo->start > sz) { in dm_set_geometry()
883 return -EINVAL; in dm_set_geometry()
886 md->geometry = *geo; in dm_set_geometry()
893 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __noflush_suspending()
905 struct mapped_device *md = io->md; in dec_pending()
907 /* Push-back supersedes any I/O errors */ in dec_pending()
909 spin_lock_irqsave(&io->endio_lock, flags); in dec_pending()
910 if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) in dec_pending()
911 io->status = error; in dec_pending()
912 spin_unlock_irqrestore(&io->endio_lock, flags); in dec_pending()
915 if (atomic_dec_and_test(&io->io_count)) { in dec_pending()
916 if (io->status == BLK_STS_DM_REQUEUE) { in dec_pending()
920 spin_lock_irqsave(&md->deferred_lock, flags); in dec_pending()
922 /* NOTE early return due to BLK_STS_DM_REQUEUE below */ in dec_pending()
923 bio_list_add_head(&md->deferred, io->orig_bio); in dec_pending()
925 /* noflush suspend was interrupted. */ in dec_pending()
926 io->status = BLK_STS_IOERR; in dec_pending()
927 spin_unlock_irqrestore(&md->deferred_lock, flags); in dec_pending()
930 io_error = io->status; in dec_pending()
931 bio = io->orig_bio; in dec_pending()
938 if ((bio->bi_opf & REQ_PREFLUSH) && bio->bi_iter.bi_size) { in dec_pending()
943 bio->bi_opf &= ~REQ_PREFLUSH; in dec_pending()
948 bio->bi_status = io_error; in dec_pending()
959 limits->max_discard_sectors = 0; in disable_discard()
960 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); in disable_discard()
968 limits->max_write_same_sectors = 0; in disable_write_same()
976 limits->max_write_zeroes_sectors = 0; in disable_write_zeroes()
981 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); in swap_bios_limit()
986 blk_status_t error = bio->bi_status; in clone_endio()
988 struct dm_io *io = tio->io; in clone_endio()
989 struct mapped_device *md = tio->io->md; in clone_endio()
990 dm_endio_fn endio = tio->ti->type->end_io; in clone_endio()
991 struct bio *orig_bio = io->orig_bio; in clone_endio()
995 !bio->bi_disk->queue->limits.max_discard_sectors) in clone_endio()
998 !bio->bi_disk->queue->limits.max_write_same_sectors) in clone_endio()
1001 !bio->bi_disk->queue->limits.max_write_zeroes_sectors) in clone_endio()
1006 * For zone-append bios get offset in zone of the written in clone_endio()
1007 * sector and add that to the original bio sector pos. in clone_endio()
1010 sector_t written_sector = bio->bi_iter.bi_sector; in clone_endio()
1011 struct request_queue *q = orig_bio->bi_disk->queue; in clone_endio()
1012 u64 mask = (u64)blk_queue_zone_sectors(q) - 1; in clone_endio()
1014 orig_bio->bi_iter.bi_sector += written_sector & mask; in clone_endio()
1018 int r = endio(tio->ti, bio, &error); in clone_endio()
1034 if (unlikely(swap_bios_limit(tio->ti, bio))) { in clone_endio()
1035 struct mapped_device *md = io->md; in clone_endio()
1036 up(&md->swap_bios_semaphore); in clone_endio()
1044 * Return maximum size of I/O possible at the supplied sector up to the current
1050 return ti->len - target_offset; in max_io_len_target_boundary()
1060 * Does the target need to split IO even further? in max_io_len()
1061 * - varied (per target) IO splitting is a tenet of DM; this in max_io_len()
1064 * ti->max_io_len to override stacked chunk_sectors. in max_io_len()
1066 if (ti->max_io_len) { in max_io_len()
1067 max_len = blk_max_size_offset(ti->table->md->queue, in max_io_len()
1068 target_offset, ti->max_io_len); in max_io_len()
1081 ti->error = "Maximum size of target IO is too large"; in dm_set_target_max_io_len()
1082 return -EINVAL; in dm_set_target_max_io_len()
1085 ti->max_io_len = (uint32_t) len; in dm_set_target_max_io_len()
1093 __acquires(md->io_barrier) in dm_dax_get_live_target()
1115 long len, ret = -EIO; in dm_dax_direct_access()
1122 if (!ti->type->direct_access) in dm_dax_direct_access()
1128 ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn); in dm_dax_direct_access()
1169 if (!ti->type->dax_copy_from_iter) { in dm_dax_copy_from_iter()
1173 ret = ti->type->dax_copy_from_iter(ti, pgoff, addr, bytes, i); in dm_dax_copy_from_iter()
1193 if (!ti->type->dax_copy_to_iter) { in dm_dax_copy_to_iter()
1197 ret = ti->type->dax_copy_to_iter(ti, pgoff, addr, bytes, i); in dm_dax_copy_to_iter()
1210 int ret = -EIO; in dm_dax_zero_page_range()
1217 if (WARN_ON(!ti->type->dax_zero_page_range)) { in dm_dax_zero_page_range()
1219 * ->zero_page_range() is mandatory dax operation. If we are in dm_dax_zero_page_range()
1224 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages); in dm_dax_zero_page_range()
1236 * dm_accept_partial_bio informs the dm that the target only wants to process
1241 * +--------------------+---------------+-------+
1243 * +--------------------+---------------+-------+
1245 * <-------------- *tio->len_ptr --------------->
1246 * <------- bi_size ------->
1247 * <-- n_sectors -->
1251 * Region 2 is the remaining bio size that the target wants to process.
1252 * (it may be empty if region 1 is non-empty, although there is no reason
1253 * to make it empty)
1254 * The target requires that region 3 is to be sent in the next bio.
1256 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1263 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT; in dm_accept_partial_bio()
1265 BUG_ON(bio->bi_opf & REQ_PREFLUSH); in dm_accept_partial_bio()
1268 BUG_ON(bi_size > *tio->len_ptr); in dm_accept_partial_bio()
1271 *tio->len_ptr -= bi_size - n_sectors; in dm_accept_partial_bio()
1272 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT; in dm_accept_partial_bio()
1278 mutex_lock(&md->swap_bios_lock); in __set_swap_bios_limit()
1279 while (latch < md->swap_bios) { in __set_swap_bios_limit()
1281 down(&md->swap_bios_semaphore); in __set_swap_bios_limit()
1282 md->swap_bios--; in __set_swap_bios_limit()
1284 while (latch > md->swap_bios) { in __set_swap_bios_limit()
1286 up(&md->swap_bios_semaphore); in __set_swap_bios_limit()
1287 md->swap_bios++; in __set_swap_bios_limit()
1289 mutex_unlock(&md->swap_bios_lock); in __set_swap_bios_limit()
1296 struct bio *clone = &tio->clone; in __map_bio()
1297 struct dm_io *io = tio->io; in __map_bio()
1298 struct dm_target *ti = tio->ti; in __map_bio()
1301 clone->bi_end_io = clone_endio; in __map_bio()
1304 * Map the clone. If r == 0 we don't need to do in __map_bio()
1308 atomic_inc(&io->io_count); in __map_bio()
1309 sector = clone->bi_iter.bi_sector; in __map_bio()
1312 struct mapped_device *md = io->md; in __map_bio()
1314 if (unlikely(latch != md->swap_bios)) in __map_bio()
1316 down(&md->swap_bios_semaphore); in __map_bio()
1319 r = ti->type->map(ti, clone); in __map_bio()
1325 trace_block_bio_remap(clone->bi_disk->queue, clone, in __map_bio()
1326 bio_dev(io->orig_bio), sector); in __map_bio()
1331 struct mapped_device *md = io->md; in __map_bio()
1332 up(&md->swap_bios_semaphore); in __map_bio()
1339 struct mapped_device *md = io->md; in __map_bio()
1340 up(&md->swap_bios_semaphore); in __map_bio()
1355 bio->bi_iter.bi_sector = sector; in bio_setup_sector()
1356 bio->bi_iter.bi_size = to_bytes(len); in bio_setup_sector()
1365 struct bio *clone = &tio->clone; in clone_bio()
1375 if (unlikely(!dm_target_has_integrity(tio->ti->type) && in clone_bio()
1376 !dm_target_passes_integrity(tio->ti->type))) { in clone_bio()
1378 dm_device_name(tio->io->md), in clone_bio()
1379 tio->ti->type->name); in clone_bio()
1380 return -EIO; in clone_bio()
1388 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); in clone_bio()
1389 clone->bi_iter.bi_size = to_bytes(len); in clone_bio()
1408 bio_list_add(blist, &tio->clone); in alloc_multiple_bios()
1417 mutex_lock(&ci->io->md->table_devices_lock); in alloc_multiple_bios()
1423 bio_list_add(blist, &tio->clone); in alloc_multiple_bios()
1426 mutex_unlock(&ci->io->md->table_devices_lock); in alloc_multiple_bios()
1440 struct bio *clone = &tio->clone; in __clone_and_map_simple_bio()
1442 tio->len_ptr = len; in __clone_and_map_simple_bio()
1444 __bio_clone_fast(clone, ci->bio); in __clone_and_map_simple_bio()
1446 bio_setup_sector(clone, ci->sector, *len); in __clone_and_map_simple_bio()
1473 * Use an on-stack bio for this, it's safe since we don't in __send_empty_flush()
1474 * need to reference it after submit. It's just used as in __send_empty_flush()
1479 ci->bio = &flush_bio; in __send_empty_flush()
1480 ci->sector_count = 0; in __send_empty_flush()
1489 bio_set_dev(ci->bio, ci->io->md->bdev); in __send_empty_flush()
1491 BUG_ON(bio_has_data(ci->bio)); in __send_empty_flush()
1492 while ((ti = dm_table_get_target(ci->map, target_nr++))) in __send_empty_flush()
1493 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL); in __send_empty_flush()
1495 bio_uninit(ci->bio); in __send_empty_flush()
1502 struct bio *bio = ci->bio; in __clone_and_map_data_bio()
1507 tio->len_ptr = len; in __clone_and_map_data_bio()
1530 return -EOPNOTSUPP; in __send_changing_extent_only()
1532 len = min_t(sector_t, ci->sector_count, in __send_changing_extent_only()
1533 max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector))); in __send_changing_extent_only()
1537 ci->sector += len; in __send_changing_extent_only()
1538 ci->sector_count -= len; in __send_changing_extent_only()
1562 struct bio *bio = ci->bio; in __process_abnormal_io()
1567 num_bios = ti->num_discard_bios; in __process_abnormal_io()
1570 num_bios = ti->num_secure_erase_bios; in __process_abnormal_io()
1573 num_bios = ti->num_write_same_bios; in __process_abnormal_io()
1576 num_bios = ti->num_write_zeroes_bios; in __process_abnormal_io()
1587 * Select the correct strategy for processing a non-flush bio.
1595 ti = dm_table_find_target(ci->map, ci->sector); in __split_and_process_non_flush()
1597 return -EIO; in __split_and_process_non_flush()
1602 len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); in __split_and_process_non_flush()
1604 r = __clone_and_map_data_bio(ci, ti, ci->sector, &len); in __split_and_process_non_flush()
1608 ci->sector += len; in __split_and_process_non_flush()
1609 ci->sector_count -= len; in __split_and_process_non_flush()
1617 ci->map = map; in init_clone_info()
1618 ci->io = alloc_io(md, bio); in init_clone_info()
1619 ci->sector = bio->bi_iter.bi_sector; in init_clone_info()
1623 (part_stat_get(part, field) -= (subnd))
1626 * Entry point to split a bio into clones and submit them to the targets.
1637 if (bio->bi_opf & REQ_PREFLUSH) { in __split_and_process_bio()
1649 if (current->bio_list && ci.sector_count && !error) { in __split_and_process_bio()
1651 * Remainder must be passed to submit_bio_noacct() in __split_and_process_bio()
1654 * We take a clone of the original to store in in __split_and_process_bio()
1655 * ci.io->orig_bio to be used by end_io_acct() and in __split_and_process_bio()
1656 * for dec_pending to use for completion handling. in __split_and_process_bio()
1658 struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count, in __split_and_process_bio()
1659 GFP_NOIO, &md->queue->bio_split); in __split_and_process_bio()
1660 ci.io->orig_bio = b; in __split_and_process_bio()
1665 * NOTE: this is a stop-gap fix, a proper fix involves in __split_and_process_bio()
1670 __dm_part_stat_sub(&dm_disk(md)->part0, in __split_and_process_bio()
1675 trace_block_split(md->queue, b, bio->bi_iter.bi_sector); in __split_and_process_bio()
1689 struct mapped_device *md = bio->bi_disk->private_data; in dm_submit_bio()
1703 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { in dm_submit_bio()
1704 if (bio->bi_opf & REQ_NOWAIT) in dm_submit_bio()
1706 else if (bio->bi_opf & REQ_RAHEAD) in dm_submit_bio()
1726 /*-----------------------------------------------------------------
1727 * An IDR is used to keep track of allocated minor numbers.
1728 *---------------------------------------------------------------*/
1744 return -EINVAL; in specific_minor()
1754 return r == -ENOSPC ? -EBUSY : r; in specific_minor()
1783 if (md->wq) in cleanup_mapped_device()
1784 destroy_workqueue(md->wq); in cleanup_mapped_device()
1785 bioset_exit(&md->bs); in cleanup_mapped_device()
1786 bioset_exit(&md->io_bs); in cleanup_mapped_device()
1788 if (md->dax_dev) { in cleanup_mapped_device()
1789 kill_dax(md->dax_dev); in cleanup_mapped_device()
1790 put_dax(md->dax_dev); in cleanup_mapped_device()
1791 md->dax_dev = NULL; in cleanup_mapped_device()
1794 if (md->disk) { in cleanup_mapped_device()
1796 md->disk->private_data = NULL; in cleanup_mapped_device()
1798 del_gendisk(md->disk); in cleanup_mapped_device()
1799 put_disk(md->disk); in cleanup_mapped_device()
1802 if (md->queue) in cleanup_mapped_device()
1803 blk_cleanup_queue(md->queue); in cleanup_mapped_device()
1805 cleanup_srcu_struct(&md->io_barrier); in cleanup_mapped_device()
1807 if (md->bdev) { in cleanup_mapped_device()
1808 bdput(md->bdev); in cleanup_mapped_device()
1809 md->bdev = NULL; in cleanup_mapped_device()
1812 mutex_destroy(&md->suspend_lock); in cleanup_mapped_device()
1813 mutex_destroy(&md->type_lock); in cleanup_mapped_device()
1814 mutex_destroy(&md->table_devices_lock); in cleanup_mapped_device()
1815 mutex_destroy(&md->swap_bios_lock); in cleanup_mapped_device()
1831 DMWARN("unable to allocate device, out of memory."); in alloc_dev()
1846 r = init_srcu_struct(&md->io_barrier); in alloc_dev()
1850 md->numa_node_id = numa_node_id; in alloc_dev()
1851 md->init_tio_pdu = false; in alloc_dev()
1852 md->type = DM_TYPE_NONE; in alloc_dev()
1853 mutex_init(&md->suspend_lock); in alloc_dev()
1854 mutex_init(&md->type_lock); in alloc_dev()
1855 mutex_init(&md->table_devices_lock); in alloc_dev()
1856 spin_lock_init(&md->deferred_lock); in alloc_dev()
1857 atomic_set(&md->holders, 1); in alloc_dev()
1858 atomic_set(&md->open_count, 0); in alloc_dev()
1859 atomic_set(&md->event_nr, 0); in alloc_dev()
1860 atomic_set(&md->uevent_seq, 0); in alloc_dev()
1861 INIT_LIST_HEAD(&md->uevent_list); in alloc_dev()
1862 INIT_LIST_HEAD(&md->table_devices); in alloc_dev()
1863 spin_lock_init(&md->uevent_lock); in alloc_dev()
1866 * default to bio-based until DM table is loaded and md->type in alloc_dev()
1867 * established. If request-based table is loaded: blk-mq will in alloc_dev()
1870 md->queue = blk_alloc_queue(numa_node_id); in alloc_dev()
1871 if (!md->queue) in alloc_dev()
1874 md->disk = alloc_disk_node(1, md->numa_node_id); in alloc_dev()
1875 if (!md->disk) in alloc_dev()
1878 init_waitqueue_head(&md->wait); in alloc_dev()
1879 INIT_WORK(&md->work, dm_wq_work); in alloc_dev()
1880 init_waitqueue_head(&md->eventq); in alloc_dev()
1881 init_completion(&md->kobj_holder.completion); in alloc_dev()
1883 md->swap_bios = get_swap_bios(); in alloc_dev()
1884 sema_init(&md->swap_bios_semaphore, md->swap_bios); in alloc_dev()
1885 mutex_init(&md->swap_bios_lock); in alloc_dev()
1887 md->disk->major = _major; in alloc_dev()
1888 md->disk->first_minor = minor; in alloc_dev()
1889 md->disk->fops = &dm_blk_dops; in alloc_dev()
1890 md->disk->queue = md->queue; in alloc_dev()
1891 md->disk->private_data = md; in alloc_dev()
1892 sprintf(md->disk->disk_name, "dm-%d", minor); in alloc_dev()
1895 md->dax_dev = alloc_dax(md, md->disk->disk_name, in alloc_dev()
1897 if (IS_ERR(md->dax_dev)) { in alloc_dev()
1898 md->dax_dev = NULL; in alloc_dev()
1903 add_disk_no_queue_reg(md->disk); in alloc_dev()
1904 format_dev_t(md->name, MKDEV(_major, minor)); in alloc_dev()
1906 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); in alloc_dev()
1907 if (!md->wq) in alloc_dev()
1910 md->bdev = bdget_disk(md->disk, 0); in alloc_dev()
1911 if (!md->bdev) in alloc_dev()
1914 dm_stats_init(&md->stats); in alloc_dev()
1940 int minor = MINOR(disk_devt(md->disk)); in free_dev()
1946 free_table_devices(&md->table_devices); in free_dev()
1947 dm_stats_cleanup(&md->stats); in free_dev()
1965 bioset_exit(&md->bs); in __bind_mempools()
1966 bioset_exit(&md->io_bs); in __bind_mempools()
1968 } else if (bioset_initialized(&md->bs)) { in __bind_mempools()
1970 * There's no need to reload with request-based dm in __bind_mempools()
1972 * Note for future: If you are to reload bioset, in __bind_mempools()
1973 * prep-ed requests in the queue may refer in __bind_mempools()
1974 * to bio from the old bioset, so you must walk in __bind_mempools()
1975 * through the queue to unprep. in __bind_mempools()
1981 bioset_initialized(&md->bs) || in __bind_mempools()
1982 bioset_initialized(&md->io_bs)); in __bind_mempools()
1984 ret = bioset_init_from_src(&md->bs, &p->bs); in __bind_mempools()
1987 ret = bioset_init_from_src(&md->io_bs, &p->io_bs); in __bind_mempools()
1989 bioset_exit(&md->bs); in __bind_mempools()
1997 * Bind a table to the device.
2005 spin_lock_irqsave(&md->uevent_lock, flags); in event_callback()
2006 list_splice_init(&md->uevent_list, &uevents); in event_callback()
2007 spin_unlock_irqrestore(&md->uevent_lock, flags); in event_callback()
2009 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); in event_callback()
2011 atomic_inc(&md->event_nr); in event_callback()
2012 wake_up(&md->eventq); in event_callback()
2023 struct request_queue *q = md->queue; in __bind()
2028 lockdep_assert_held(&md->suspend_lock); in __bind()
2036 memset(&md->geometry, 0, sizeof(md->geometry)); in __bind()
2038 set_capacity(md->disk, size); in __bind()
2039 bd_set_nr_sectors(md->bdev, size); in __bind()
2045 * Leverage the fact that request-based DM targets are in __bind()
2046 * immutable singletons - used to optimize dm_mq_queue_rq. in __bind()
2048 md->immutable_target = dm_table_get_immutable_target(t); in __bind()
2057 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __bind()
2058 rcu_assign_pointer(md->map, (void *)t); in __bind()
2059 md->immutable_target_type = dm_table_get_immutable_target_type(t); in __bind()
2070 * Returns unbound table for the caller to free.
2074 struct dm_table *map = rcu_dereference_protected(md->map, 1); in __unbind()
2080 RCU_INIT_POINTER(md->map, NULL); in __unbind()
2096 return -ENXIO; in dm_create()
2109 * Functions to manage md->type.
2110 * All are required to hold md->type_lock.
2114 mutex_lock(&md->type_lock); in dm_lock_md_type()
2119 mutex_unlock(&md->type_lock); in dm_unlock_md_type()
2124 BUG_ON(!mutex_is_locked(&md->type_lock)); in dm_set_md_type()
2125 md->type = type; in dm_set_md_type()
2130 return md->type; in dm_get_md_type()
2135 return md->immutable_target_type; in dm_get_immutable_target_type()
2144 BUG_ON(!atomic_read(&md->holders)); in dm_get_queue_limits()
2145 return &md->queue->limits; in dm_get_queue_limits()
2160 md->disk->fops = &dm_rq_blk_dops; in dm_setup_md_queue()
2163 DMERR("Cannot initialize queue for request-based dm mapped device"); in dm_setup_md_queue()
2180 dm_table_set_restrictions(t, md->queue, &limits); in dm_setup_md_queue()
2181 blk_register_queue(md->disk); in dm_setup_md_queue()
2198 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { in dm_get_md()
2212 return md->interface_ptr; in dm_get_mdptr()
2217 md->interface_ptr = ptr; in dm_set_mdptr()
2222 atomic_inc(&md->holders); in dm_get()
2223 BUG_ON(test_bit(DMF_FREEING, &md->flags)); in dm_get()
2229 if (test_bit(DMF_FREEING, &md->flags)) { in dm_hold()
2231 return -EBUSY; in dm_hold()
2241 return md->name; in dm_device_name()
2254 set_bit(DMF_FREEING, &md->flags); in __dm_destroy()
2257 blk_set_queue_dying(md->queue); in __dm_destroy()
2261 * do not race with internal suspend. in __dm_destroy()
2263 mutex_lock(&md->suspend_lock); in __dm_destroy()
2267 set_bit(DMF_SUSPENDED, &md->flags); in __dm_destroy()
2268 set_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_destroy()
2273 mutex_unlock(&md->suspend_lock); in __dm_destroy()
2276 * Rare, but there may be I/O requests still going to complete, in __dm_destroy()
2277 * for example. Wait for all references to disappear. in __dm_destroy()
2282 while (atomic_read(&md->holders)) in __dm_destroy()
2284 else if (atomic_read(&md->holders)) in __dm_destroy()
2286 dm_device_name(md), atomic_read(&md->holders)); in __dm_destroy()
2305 atomic_dec(&md->holders); in dm_put()
2312 struct hd_struct *part = &dm_disk(md)->part0; in md_in_flight_bios()
2329 prepare_to_wait(&md->wait, &wait, task_state); in dm_wait_for_bios_completion()
2335 r = -EINTR; in dm_wait_for_bios_completion()
2341 finish_wait(&md->wait, &wait); in dm_wait_for_bios_completion()
2350 if (!queue_is_mq(md->queue)) in dm_wait_for_completion()
2354 if (!blk_mq_queue_inflight(md->queue)) in dm_wait_for_completion()
2358 r = -EINTR; in dm_wait_for_completion()
2376 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { in dm_wq_work()
2377 spin_lock_irq(&md->deferred_lock); in dm_wq_work()
2378 bio = bio_list_pop(&md->deferred); in dm_wq_work()
2379 spin_unlock_irq(&md->deferred_lock); in dm_wq_work()
2390 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in dm_queue_flush()
2392 queue_work(md->wq, &md->work); in dm_queue_flush()
2396 * Swap in a new table, returning the old one for the caller to destroy.
2400 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); in dm_swap_table()
2404 mutex_lock(&md->suspend_lock); in dm_swap_table()
2419 limits = md->queue->limits; in dm_swap_table()
2435 mutex_unlock(&md->suspend_lock); in dm_swap_table()
2440 * Functions to lock and unlock any filesystem running on the
2447 WARN_ON(md->frozen_sb); in lock_fs()
2449 md->frozen_sb = freeze_bdev(md->bdev); in lock_fs()
2450 if (IS_ERR(md->frozen_sb)) { in lock_fs()
2451 r = PTR_ERR(md->frozen_sb); in lock_fs()
2452 md->frozen_sb = NULL; in lock_fs()
2456 set_bit(DMF_FROZEN, &md->flags); in lock_fs()
2463 if (!test_bit(DMF_FROZEN, &md->flags)) in unlock_fs()
2466 thaw_bdev(md->bdev, md->frozen_sb); in unlock_fs()
2467 md->frozen_sb = NULL; in unlock_fs()
2468 clear_bit(DMF_FROZEN, &md->flags); in unlock_fs()
2477 * now. There is no request-processing activity. All new requests
2478 * are being added to md->deferred list.
2488 lockdep_assert_held(&md->suspend_lock); in __dm_suspend()
2495 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __dm_suspend()
2506 * Flush I/O to the device. in __dm_suspend()
2509 * (lock_fs() flushes I/Os and waits for them to complete.) in __dm_suspend()
2521 * to target drivers i.e. no one may be executing in __dm_suspend()
2524 * To get all processes out of __split_and_process_bio in dm_submit_bio, in __dm_suspend()
2525 * we take the write lock. To prevent any process from reentering in __dm_suspend()
2528 * flush_workqueue(md->wq). in __dm_suspend()
2530 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in __dm_suspend()
2532 synchronize_srcu(&md->io_barrier); in __dm_suspend()
2535 * Stop md->queue before flushing md->wq in case request-based in __dm_suspend()
2536 * dm defers requests to md->wq from md->queue. in __dm_suspend()
2539 dm_stop_queue(md->queue); in __dm_suspend()
2541 flush_workqueue(md->wq); in __dm_suspend()
2545 * We call dm_wait_for_completion to wait for all existing requests in __dm_suspend()
2546 * to finish. in __dm_suspend()
2550 set_bit(dmf_suspended_flag, &md->flags); in __dm_suspend()
2553 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __dm_suspend()
2555 synchronize_srcu(&md->io_barrier); in __dm_suspend()
2562 dm_start_queue(md->queue); in __dm_suspend()
2573 * We need to be able to change a mapping table under a mounted
2574 * filesystem. For example we might want to move some data in
2576 * dm_bind_table, dm_suspend must be called to flush any in
2580 * Suspend mechanism in request-based dm.
2584 * 3. Wait for all in-flight I/Os to be completed or requeued.
2586 * To abort suspend, start the request_queue.
2594 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); in dm_suspend()
2597 r = -EINVAL; in dm_suspend()
2603 mutex_unlock(&md->suspend_lock); in dm_suspend()
2604 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); in dm_suspend()
2610 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in dm_suspend()
2616 set_bit(DMF_POST_SUSPENDING, &md->flags); in dm_suspend()
2618 clear_bit(DMF_POST_SUSPENDING, &md->flags); in dm_suspend()
2621 mutex_unlock(&md->suspend_lock); in dm_suspend()
2638 * Request-based dm is queueing the deferred I/Os in its request_queue. in __dm_resume()
2641 dm_start_queue(md->queue); in __dm_resume()
2654 r = -EINVAL; in dm_resume()
2655 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); in dm_resume()
2662 mutex_unlock(&md->suspend_lock); in dm_resume()
2663 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); in dm_resume()
2669 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in dm_resume()
2677 clear_bit(DMF_SUSPENDED, &md->flags); in dm_resume()
2679 mutex_unlock(&md->suspend_lock); in dm_resume()
2685 * Internal suspend/resume works like userspace-driven suspend. It waits
2686 * until all bios finish and prevents issuing new bios to the target drivers.
2694 lockdep_assert_held(&md->suspend_lock); in __dm_internal_suspend()
2696 if (md->internal_suspend_count++) in __dm_internal_suspend()
2697 return; /* nested internal suspend */ in __dm_internal_suspend()
2700 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_suspend()
2701 return; /* nest suspend */ in __dm_internal_suspend()
2704 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __dm_internal_suspend()
2707 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is in __dm_internal_suspend()
2708 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend in __dm_internal_suspend()
2709 * would require changing .presuspend to return an error -- avoid this in __dm_internal_suspend()
2710 * until there is a need for more elaborate variants of internal suspend. in __dm_internal_suspend()
2715 set_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_internal_suspend()
2717 clear_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_internal_suspend()
2722 BUG_ON(!md->internal_suspend_count); in __dm_internal_resume()
2724 if (--md->internal_suspend_count) in __dm_internal_resume()
2725 return; /* resume from nested internal suspend */ in __dm_internal_resume()
2728 goto done; /* resume from nested suspend */ in __dm_internal_resume()
2731 * NOTE: existing callers don't need to call dm_table_resume_targets in __dm_internal_resume()
2732 * (which may fail -- so best to avoid it for now by passing NULL map) in __dm_internal_resume()
2737 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_resume()
2739 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); in __dm_internal_resume()
2744 mutex_lock(&md->suspend_lock); in dm_internal_suspend_noflush()
2746 mutex_unlock(&md->suspend_lock); in dm_internal_suspend_noflush()
2752 mutex_lock(&md->suspend_lock); in dm_internal_resume()
2754 mutex_unlock(&md->suspend_lock); in dm_internal_resume()
2759 * Fast variants of internal suspend/resume hold md->suspend_lock,
2760 * which prevents interaction with userspace-driven suspend.
2765 mutex_lock(&md->suspend_lock); in dm_internal_suspend_fast()
2769 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in dm_internal_suspend_fast()
2770 synchronize_srcu(&md->io_barrier); in dm_internal_suspend_fast()
2771 flush_workqueue(md->wq); in dm_internal_suspend_fast()
2784 mutex_unlock(&md->suspend_lock); in dm_internal_resume_fast()
2788 /*-----------------------------------------------------------------
2790 *---------------------------------------------------------------*/
2802 r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); in dm_kobject_uevent()
2806 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, in dm_kobject_uevent()
2817 return atomic_add_return(1, &md->uevent_seq); in dm_next_uevent_seq()
2822 return atomic_read(&md->event_nr); in dm_get_event_nr()
2827 return wait_event_interruptible(md->eventq, in dm_wait_event()
2828 (event_nr != atomic_read(&md->event_nr))); in dm_wait_event()
2835 spin_lock_irqsave(&md->uevent_lock, flags); in dm_uevent_add()
2836 list_add(elist, &md->uevent_list); in dm_uevent_add()
2837 spin_unlock_irqrestore(&md->uevent_lock, flags); in dm_uevent_add()
2846 return md->disk; in dm_disk()
2852 return &md->kobj_holder.kobj; in dm_kobject()
2862 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { in dm_get_from_kobject()
2875 return test_bit(DMF_SUSPENDED, &md->flags); in dm_suspended_md()
2880 return test_bit(DMF_POST_SUSPENDING, &md->flags); in dm_post_suspending_md()
2885 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in dm_suspended_internally_md()
2890 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_test_deferred_remove_flag()
2895 return dm_suspended_md(ti->table->md); in dm_suspended()
2901 return dm_post_suspending_md(ti->table->md); in dm_post_suspending()
2907 return __noflush_suspending(ti->table->md); in dm_noflush_suspending()
2915 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); in dm_alloc_md_mempools()
2929 ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); in dm_alloc_md_mempools()
2932 if (integrity && bioset_integrity_create(&pools->io_bs, pool_size)) in dm_alloc_md_mempools()
2938 /* per_io_data_size is used for blk-mq pdu at queue allocation */ in dm_alloc_md_mempools()
2944 ret = bioset_init(&pools->bs, pool_size, front_pad, 0); in dm_alloc_md_mempools()
2948 if (integrity && bioset_integrity_create(&pools->bs, pool_size)) in dm_alloc_md_mempools()
2964 bioset_exit(&pools->bs); in dm_free_md_mempools()
2965 bioset_exit(&pools->io_bs); in dm_free_md_mempools()
2980 struct mapped_device *md = bdev->bd_disk->private_data; in dm_call_pr()
2983 int ret = -ENOTTY, srcu_idx; in dm_call_pr()
2994 ret = -EINVAL; in dm_call_pr()
2995 if (!ti->type->iterate_devices) in dm_call_pr()
2998 ret = ti->type->iterate_devices(ti, fn, data); in dm_call_pr()
3005 * For register / unregister we need to manually call out to every path.
3011 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops; in __dm_pr_register()
3013 if (!ops || !ops->pr_register) in __dm_pr_register()
3014 return -EOPNOTSUPP; in __dm_pr_register()
3015 return ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags); in __dm_pr_register()
3031 /* unregister all paths if we failed to register any path */ in dm_pr_register()
3045 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_reserve()
3053 ops = bdev->bd_disk->fops->pr_ops; in dm_pr_reserve()
3054 if (ops && ops->pr_reserve) in dm_pr_reserve()
3055 r = ops->pr_reserve(bdev, key, type, flags); in dm_pr_reserve()
3057 r = -EOPNOTSUPP; in dm_pr_reserve()
3065 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_release()
3073 ops = bdev->bd_disk->fops->pr_ops; in dm_pr_release()
3074 if (ops && ops->pr_release) in dm_pr_release()
3075 r = ops->pr_release(bdev, key, type); in dm_pr_release()
3077 r = -EOPNOTSUPP; in dm_pr_release()
3086 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_preempt()
3094 ops = bdev->bd_disk->fops->pr_ops; in dm_pr_preempt()
3095 if (ops && ops->pr_preempt) in dm_pr_preempt()
3096 r = ops->pr_preempt(bdev, old_key, new_key, type, abort); in dm_pr_preempt()
3098 r = -EOPNOTSUPP; in dm_pr_preempt()
3106 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_clear()
3114 ops = bdev->bd_disk->fops->pr_ops; in dm_pr_clear()
3115 if (ops && ops->pr_clear) in dm_pr_clear()
3116 r = ops->pr_clear(bdev, key); in dm_pr_clear()
3118 r = -EOPNOTSUPP; in dm_pr_clear()
3170 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3179 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");