Lines Matching refs:s
176 struct dm_dev *dm_snap_origin(struct dm_snapshot *s) in dm_snap_origin() argument
178 return s->origin; in dm_snap_origin()
182 struct dm_dev *dm_snap_cow(struct dm_snapshot *s) in dm_snap_cow() argument
184 return s->cow; in dm_snap_cow()
261 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk) in track_chunk() argument
267 spin_lock_irq(&s->tracked_chunk_lock); in track_chunk()
269 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); in track_chunk()
270 spin_unlock_irq(&s->tracked_chunk_lock); in track_chunk()
273 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio) in stop_tracking_chunk() argument
278 spin_lock_irqsave(&s->tracked_chunk_lock, flags); in stop_tracking_chunk()
280 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); in stop_tracking_chunk()
283 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) in __chunk_is_tracked() argument
288 spin_lock_irq(&s->tracked_chunk_lock); in __chunk_is_tracked()
291 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { in __chunk_is_tracked()
298 spin_unlock_irq(&s->tracked_chunk_lock); in __chunk_is_tracked()
307 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) in __check_for_conflicting_io() argument
309 while (__chunk_is_tracked(s, chunk)) in __check_for_conflicting_io()
453 struct dm_snapshot *s; in __find_snapshots_sharing_cow() local
462 list_for_each_entry(s, &o->snapshots, list) { in __find_snapshots_sharing_cow()
463 if (dm_target_is_snapshot_merge(s->ti) && snap_merge) in __find_snapshots_sharing_cow()
464 *snap_merge = s; in __find_snapshots_sharing_cow()
465 if (!bdev_equal(s->cow->bdev, snap->cow->bdev)) in __find_snapshots_sharing_cow()
468 down_read(&s->lock); in __find_snapshots_sharing_cow()
469 active = s->active; in __find_snapshots_sharing_cow()
470 up_read(&s->lock); in __find_snapshots_sharing_cow()
474 *snap_src = s; in __find_snapshots_sharing_cow()
476 *snap_dest = s; in __find_snapshots_sharing_cow()
534 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s) in __insert_snapshot() argument
540 if (l->store->chunk_size < s->store->chunk_size) in __insert_snapshot()
542 list_add_tail(&s->list, &l->list); in __insert_snapshot()
596 static void reregister_snapshot(struct dm_snapshot *s) in reregister_snapshot() argument
598 struct block_device *bdev = s->origin->bdev; in reregister_snapshot()
602 list_del(&s->list); in reregister_snapshot()
603 __insert_snapshot(__lookup_origin(bdev), s); in reregister_snapshot()
608 static void unregister_snapshot(struct dm_snapshot *s) in unregister_snapshot() argument
613 o = __lookup_origin(s->origin->bdev); in unregister_snapshot()
615 list_del(&s->list); in unregister_snapshot()
637 static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk, in dm_exception_table_lock_init() argument
640 struct dm_exception_table *complete = &s->complete; in dm_exception_table_lock_init()
641 struct dm_exception_table *pending = &s->pending; in dm_exception_table_lock_init()
741 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) in alloc_pending_exception() argument
743 struct dm_snap_pending_exception *pe = mempool_alloc(&s->pending_pool, in alloc_pending_exception()
746 atomic_inc(&s->pending_exceptions_count); in alloc_pending_exception()
747 pe->snap = s; in alloc_pending_exception()
754 struct dm_snapshot *s = pe->snap; in free_pending_exception() local
756 mempool_free(pe, &s->pending_pool); in free_pending_exception()
758 atomic_dec(&s->pending_exceptions_count); in free_pending_exception()
823 struct dm_snapshot *s = context; in dm_add_exception() local
841 dm_exception_table_lock_init(s, old, &lock); in dm_add_exception()
844 dm_insert_exception(&s->complete, e); in dm_add_exception()
882 static int init_hash_tables(struct dm_snapshot *s) in init_hash_tables() argument
890 cow_dev_size = get_dev_size(s->cow->bdev); in init_hash_tables()
893 hash_size = cow_dev_size >> s->store->chunk_shift; in init_hash_tables()
899 if (dm_exception_table_init(&s->complete, hash_size, in init_hash_tables()
911 if (dm_exception_table_init(&s->pending, hash_size, 0)) { in init_hash_tables()
912 dm_exception_table_exit(&s->complete, exception_cache); in init_hash_tables()
919 static void merge_shutdown(struct dm_snapshot *s) in merge_shutdown() argument
921 clear_bit_unlock(RUNNING_MERGE, &s->state_bits); in merge_shutdown()
923 wake_up_bit(&s->state_bits, RUNNING_MERGE); in merge_shutdown()
926 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) in __release_queued_bios_after_merge() argument
928 s->first_merging_chunk = 0; in __release_queued_bios_after_merge()
929 s->num_merging_chunks = 0; in __release_queued_bios_after_merge()
931 return bio_list_get(&s->bios_queued_during_merge); in __release_queued_bios_after_merge()
937 static int __remove_single_exception_chunk(struct dm_snapshot *s, in __remove_single_exception_chunk() argument
942 e = dm_lookup_exception(&s->complete, old_chunk); in __remove_single_exception_chunk()
988 static int remove_single_exception_chunk(struct dm_snapshot *s) in remove_single_exception_chunk() argument
992 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1; in remove_single_exception_chunk()
994 down_write(&s->lock); in remove_single_exception_chunk()
1001 r = __remove_single_exception_chunk(s, old_chunk); in remove_single_exception_chunk()
1004 } while (old_chunk-- > s->first_merging_chunk); in remove_single_exception_chunk()
1006 b = __release_queued_bios_after_merge(s); in remove_single_exception_chunk()
1009 up_write(&s->lock); in remove_single_exception_chunk()
1042 static void snapshot_merge_next_chunks(struct dm_snapshot *s) in snapshot_merge_next_chunks() argument
1050 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); in snapshot_merge_next_chunks()
1051 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits))) in snapshot_merge_next_chunks()
1057 if (!s->valid) { in snapshot_merge_next_chunks()
1062 linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk, in snapshot_merge_next_chunks()
1068 down_write(&s->lock); in snapshot_merge_next_chunks()
1069 s->merge_failed = true; in snapshot_merge_next_chunks()
1070 up_write(&s->lock); in snapshot_merge_next_chunks()
1083 io_size = linear_chunks * s->store->chunk_size; in snapshot_merge_next_chunks()
1085 dest.bdev = s->origin->bdev; in snapshot_merge_next_chunks()
1086 dest.sector = chunk_to_sector(s->store, old_chunk); in snapshot_merge_next_chunks()
1089 src.bdev = s->cow->bdev; in snapshot_merge_next_chunks()
1090 src.sector = chunk_to_sector(s->store, new_chunk); in snapshot_merge_next_chunks()
1103 while (origin_write_extent(s, dest.sector, io_size)) { in snapshot_merge_next_chunks()
1111 down_write(&s->lock); in snapshot_merge_next_chunks()
1112 s->first_merging_chunk = old_chunk; in snapshot_merge_next_chunks()
1113 s->num_merging_chunks = linear_chunks; in snapshot_merge_next_chunks()
1114 up_write(&s->lock); in snapshot_merge_next_chunks()
1118 __check_for_conflicting_io(s, old_chunk + i); in snapshot_merge_next_chunks()
1120 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); in snapshot_merge_next_chunks()
1124 merge_shutdown(s); in snapshot_merge_next_chunks()
1129 static int flush_data(struct dm_snapshot *s) in flush_data() argument
1131 struct bio *flush_bio = &s->flush_bio; in flush_data()
1134 bio_set_dev(flush_bio, s->origin->bdev); in flush_data()
1142 struct dm_snapshot *s = context; in merge_callback() local
1153 if (flush_data(s) < 0) { in merge_callback()
1158 if (s->store->type->commit_merge(s->store, in merge_callback()
1159 s->num_merging_chunks) < 0) { in merge_callback()
1164 if (remove_single_exception_chunk(s) < 0) in merge_callback()
1167 snapshot_merge_next_chunks(s); in merge_callback()
1172 down_write(&s->lock); in merge_callback()
1173 s->merge_failed = true; in merge_callback()
1174 b = __release_queued_bios_after_merge(s); in merge_callback()
1175 up_write(&s->lock); in merge_callback()
1178 merge_shutdown(s); in merge_callback()
1181 static void start_merge(struct dm_snapshot *s) in start_merge() argument
1183 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits)) in start_merge()
1184 snapshot_merge_next_chunks(s); in start_merge()
1190 static void stop_merge(struct dm_snapshot *s) in stop_merge() argument
1192 set_bit(SHUTDOWN_MERGE, &s->state_bits); in stop_merge()
1193 wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE); in stop_merge()
1194 clear_bit(SHUTDOWN_MERGE, &s->state_bits); in stop_merge()
1197 static int parse_snapshot_features(struct dm_arg_set *as, struct dm_snapshot *s, in parse_snapshot_features() argument
1223 s->discard_zeroes_cow = true; in parse_snapshot_features()
1226 s->discard_passdown_origin = true; in parse_snapshot_features()
1235 if (!s->discard_zeroes_cow && s->discard_passdown_origin) { in parse_snapshot_features()
1253 struct dm_snapshot *s; in snapshot_ctr() local
1273 s = kzalloc(sizeof(*s), GFP_KERNEL); in snapshot_ctr()
1274 if (!s) { in snapshot_ctr()
1283 r = parse_snapshot_features(&as, s, ti); in snapshot_ctr()
1291 r = dm_get_device(ti, origin_path, origin_mode, &s->origin); in snapshot_ctr()
1296 origin_dev = s->origin->bdev->bd_dev; in snapshot_ctr()
1309 r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow); in snapshot_ctr()
1315 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store); in snapshot_ctr()
1325 s->ti = ti; in snapshot_ctr()
1326 s->valid = 1; in snapshot_ctr()
1327 s->snapshot_overflowed = 0; in snapshot_ctr()
1328 s->active = 0; in snapshot_ctr()
1329 atomic_set(&s->pending_exceptions_count, 0); in snapshot_ctr()
1330 spin_lock_init(&s->pe_allocation_lock); in snapshot_ctr()
1331 s->exception_start_sequence = 0; in snapshot_ctr()
1332 s->exception_complete_sequence = 0; in snapshot_ctr()
1333 s->out_of_order_tree = RB_ROOT; in snapshot_ctr()
1334 init_rwsem(&s->lock); in snapshot_ctr()
1335 INIT_LIST_HEAD(&s->list); in snapshot_ctr()
1336 spin_lock_init(&s->pe_lock); in snapshot_ctr()
1337 s->state_bits = 0; in snapshot_ctr()
1338 s->merge_failed = false; in snapshot_ctr()
1339 s->first_merging_chunk = 0; in snapshot_ctr()
1340 s->num_merging_chunks = 0; in snapshot_ctr()
1341 bio_list_init(&s->bios_queued_during_merge); in snapshot_ctr()
1342 bio_init(&s->flush_bio, NULL, 0); in snapshot_ctr()
1345 if (init_hash_tables(s)) { in snapshot_ctr()
1351 init_waitqueue_head(&s->in_progress_wait); in snapshot_ctr()
1353 s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); in snapshot_ctr()
1354 if (IS_ERR(s->kcopyd_client)) { in snapshot_ctr()
1355 r = PTR_ERR(s->kcopyd_client); in snapshot_ctr()
1360 r = mempool_init_slab_pool(&s->pending_pool, MIN_IOS, pending_cache); in snapshot_ctr()
1367 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); in snapshot_ctr()
1369 spin_lock_init(&s->tracked_chunk_lock); in snapshot_ctr()
1371 ti->private = s; in snapshot_ctr()
1373 if (s->discard_zeroes_cow) in snapshot_ctr()
1374 ti->num_discard_bios = (s->discard_passdown_origin ? 2 : 1); in snapshot_ctr()
1379 r = register_snapshot(s); in snapshot_ctr()
1395 s->store->chunk_size = 0; in snapshot_ctr()
1399 r = s->store->type->read_metadata(s->store, dm_add_exception, in snapshot_ctr()
1400 (void *)s); in snapshot_ctr()
1405 s->valid = 0; in snapshot_ctr()
1409 if (!s->store->chunk_size) { in snapshot_ctr()
1415 r = dm_set_target_max_io_len(ti, s->store->chunk_size); in snapshot_ctr()
1422 unregister_snapshot(s); in snapshot_ctr()
1424 mempool_exit(&s->pending_pool); in snapshot_ctr()
1426 dm_kcopyd_client_destroy(s->kcopyd_client); in snapshot_ctr()
1428 dm_exception_table_exit(&s->pending, pending_cache); in snapshot_ctr()
1429 dm_exception_table_exit(&s->complete, exception_cache); in snapshot_ctr()
1431 dm_exception_store_destroy(s->store); in snapshot_ctr()
1433 dm_put_device(ti, s->cow); in snapshot_ctr()
1435 dm_put_device(ti, s->origin); in snapshot_ctr()
1438 kfree(s); in snapshot_ctr()
1443 static void __free_exceptions(struct dm_snapshot *s) in __free_exceptions() argument
1445 dm_kcopyd_client_destroy(s->kcopyd_client); in __free_exceptions()
1446 s->kcopyd_client = NULL; in __free_exceptions()
1448 dm_exception_table_exit(&s->pending, pending_cache); in __free_exceptions()
1449 dm_exception_table_exit(&s->complete, exception_cache); in __free_exceptions()
1490 struct dm_snapshot *s = ti->private; in snapshot_dtr() local
1495 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); in snapshot_dtr()
1496 if (snap_src && snap_dest && (s == snap_src)) { in snapshot_dtr()
1505 stop_merge(s); in snapshot_dtr()
1509 unregister_snapshot(s); in snapshot_dtr()
1511 while (atomic_read(&s->pending_exceptions_count)) in snapshot_dtr()
1521 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); in snapshot_dtr()
1524 __free_exceptions(s); in snapshot_dtr()
1526 mempool_exit(&s->pending_pool); in snapshot_dtr()
1528 dm_exception_store_destroy(s->store); in snapshot_dtr()
1530 bio_uninit(&s->flush_bio); in snapshot_dtr()
1532 dm_put_device(ti, s->cow); in snapshot_dtr()
1534 dm_put_device(ti, s->origin); in snapshot_dtr()
1536 WARN_ON(s->in_progress); in snapshot_dtr()
1538 kfree(s); in snapshot_dtr()
1541 static void account_start_copy(struct dm_snapshot *s) in account_start_copy() argument
1543 spin_lock(&s->in_progress_wait.lock); in account_start_copy()
1544 s->in_progress++; in account_start_copy()
1545 spin_unlock(&s->in_progress_wait.lock); in account_start_copy()
1548 static void account_end_copy(struct dm_snapshot *s) in account_end_copy() argument
1550 spin_lock(&s->in_progress_wait.lock); in account_end_copy()
1551 BUG_ON(!s->in_progress); in account_end_copy()
1552 s->in_progress--; in account_end_copy()
1553 if (likely(s->in_progress <= cow_threshold) && in account_end_copy()
1554 unlikely(waitqueue_active(&s->in_progress_wait))) in account_end_copy()
1555 wake_up_locked(&s->in_progress_wait); in account_end_copy()
1556 spin_unlock(&s->in_progress_wait.lock); in account_end_copy()
1559 static bool wait_for_in_progress(struct dm_snapshot *s, bool unlock_origins) in wait_for_in_progress() argument
1561 if (unlikely(s->in_progress > cow_threshold)) { in wait_for_in_progress()
1562 spin_lock(&s->in_progress_wait.lock); in wait_for_in_progress()
1563 if (likely(s->in_progress > cow_threshold)) { in wait_for_in_progress()
1572 __add_wait_queue(&s->in_progress_wait, &wait); in wait_for_in_progress()
1574 spin_unlock(&s->in_progress_wait.lock); in wait_for_in_progress()
1578 remove_wait_queue(&s->in_progress_wait, &wait); in wait_for_in_progress()
1581 spin_unlock(&s->in_progress_wait.lock); in wait_for_in_progress()
1606 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) in retry_origin_bios() argument
1614 r = do_origin(s->origin, bio, false); in retry_origin_bios()
1636 static void __invalidate_snapshot(struct dm_snapshot *s, int err) in __invalidate_snapshot() argument
1638 if (!s->valid) in __invalidate_snapshot()
1646 if (s->store->type->drop_snapshot) in __invalidate_snapshot()
1647 s->store->type->drop_snapshot(s->store); in __invalidate_snapshot()
1649 s->valid = 0; in __invalidate_snapshot()
1651 dm_table_event(s->ti->table); in __invalidate_snapshot()
1654 static void invalidate_snapshot(struct dm_snapshot *s, int err) in invalidate_snapshot() argument
1656 down_write(&s->lock); in invalidate_snapshot()
1657 __invalidate_snapshot(s, err); in invalidate_snapshot()
1658 up_write(&s->lock); in invalidate_snapshot()
1665 struct dm_snapshot *s = pe->snap; in pending_complete() local
1672 dm_exception_table_lock_init(s, pe->e.old_chunk, &lock); in pending_complete()
1676 invalidate_snapshot(s, -EIO); in pending_complete()
1685 invalidate_snapshot(s, -ENOMEM); in pending_complete()
1693 down_read(&s->lock); in pending_complete()
1695 if (!s->valid) { in pending_complete()
1696 up_read(&s->lock); in pending_complete()
1710 dm_insert_exception(&s->complete, e); in pending_complete()
1711 up_read(&s->lock); in pending_complete()
1714 if (__chunk_is_tracked(s, pe->e.old_chunk)) { in pending_complete()
1716 __check_for_conflicting_io(s, pe->e.old_chunk); in pending_complete()
1744 retry_origin_bios(s, origin_bios); in pending_complete()
1751 struct dm_snapshot *s = pe->snap; in complete_exception() local
1754 s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error, in complete_exception()
1765 struct dm_snapshot *s = pe->snap; in copy_callback() local
1769 if (pe->exception_sequence == s->exception_complete_sequence) { in copy_callback()
1772 s->exception_complete_sequence++; in copy_callback()
1775 next = rb_first(&s->out_of_order_tree); in copy_callback()
1779 if (pe->exception_sequence != s->exception_complete_sequence) in copy_callback()
1782 s->exception_complete_sequence++; in copy_callback()
1783 rb_erase(&pe->out_of_order_node, &s->out_of_order_tree); in copy_callback()
1789 struct rb_node **p = &s->out_of_order_tree.rb_node; in copy_callback()
1804 rb_insert_color(&pe->out_of_order_node, &s->out_of_order_tree); in copy_callback()
1806 account_end_copy(s); in copy_callback()
1814 struct dm_snapshot *s = pe->snap; in start_copy() local
1816 struct block_device *bdev = s->origin->bdev; in start_copy()
1822 src.sector = chunk_to_sector(s->store, pe->e.old_chunk); in start_copy()
1823 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); in start_copy()
1825 dest.bdev = s->cow->bdev; in start_copy()
1826 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); in start_copy()
1830 account_start_copy(s); in start_copy()
1831 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); in start_copy()
1844 struct dm_snapshot *s = pe->snap; in start_full_bio() local
1850 account_start_copy(s); in start_full_bio()
1851 callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, in start_full_bio()
1861 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) in __lookup_pending_exception() argument
1863 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk); in __lookup_pending_exception()
1878 __insert_pending_exception(struct dm_snapshot *s, in __insert_pending_exception() argument
1887 spin_lock(&s->pe_allocation_lock); in __insert_pending_exception()
1888 if (s->store->type->prepare_exception(s->store, &pe->e)) { in __insert_pending_exception()
1889 spin_unlock(&s->pe_allocation_lock); in __insert_pending_exception()
1894 pe->exception_sequence = s->exception_start_sequence++; in __insert_pending_exception()
1895 spin_unlock(&s->pe_allocation_lock); in __insert_pending_exception()
1897 dm_insert_exception(&s->pending, &pe->e); in __insert_pending_exception()
1911 __find_pending_exception(struct dm_snapshot *s, in __find_pending_exception() argument
1916 pe2 = __lookup_pending_exception(s, chunk); in __find_pending_exception()
1922 return __insert_pending_exception(s, pe, chunk); in __find_pending_exception()
1925 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, in remap_exception() argument
1928 bio_set_dev(bio, s->cow->bdev); in remap_exception()
1930 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + in remap_exception()
1932 (bio->bi_iter.bi_sector & s->store->chunk_mask); in remap_exception()
1938 struct dm_snapshot *s = bio->bi_private; in zero_callback() local
1940 account_end_copy(s); in zero_callback()
1945 static void zero_exception(struct dm_snapshot *s, struct dm_exception *e, in zero_exception() argument
1950 dest.bdev = s->cow->bdev; in zero_exception()
1952 dest.count = s->store->chunk_size; in zero_exception()
1954 account_start_copy(s); in zero_exception()
1956 bio->bi_private = s; in zero_exception()
1957 dm_kcopyd_zero(s->kcopyd_client, 1, &dest, 0, zero_callback, bio); in zero_exception()
1960 static bool io_overlaps_chunk(struct dm_snapshot *s, struct bio *bio) in io_overlaps_chunk() argument
1963 (s->store->chunk_size << SECTOR_SHIFT); in io_overlaps_chunk()
1969 struct dm_snapshot *s = ti->private; in snapshot_map() local
1978 bio_set_dev(bio, s->cow->bdev); in snapshot_map()
1982 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); in snapshot_map()
1983 dm_exception_table_lock_init(s, chunk, &lock); in snapshot_map()
1987 if (!s->valid) in snapshot_map()
1991 while (unlikely(!wait_for_in_progress(s, false))) in snapshot_map()
1995 down_read(&s->lock); in snapshot_map()
1998 if (!s->valid || (unlikely(s->snapshot_overflowed) && in snapshot_map()
2005 if (s->discard_passdown_origin && dm_bio_get_target_bio_nr(bio)) { in snapshot_map()
2012 bio_set_dev(bio, s->origin->bdev); in snapshot_map()
2013 track_chunk(s, bio, chunk); in snapshot_map()
2020 e = dm_lookup_exception(&s->complete, chunk); in snapshot_map()
2022 remap_exception(s, e, bio, chunk); in snapshot_map()
2024 io_overlaps_chunk(s, bio)) { in snapshot_map()
2026 up_read(&s->lock); in snapshot_map()
2027 zero_exception(s, e, bio, chunk); in snapshot_map()
2050 pe = __lookup_pending_exception(s, chunk); in snapshot_map()
2053 pe = alloc_pending_exception(s); in snapshot_map()
2056 e = dm_lookup_exception(&s->complete, chunk); in snapshot_map()
2059 remap_exception(s, e, bio, chunk); in snapshot_map()
2063 pe = __find_pending_exception(s, pe, chunk); in snapshot_map()
2066 up_read(&s->lock); in snapshot_map()
2068 down_write(&s->lock); in snapshot_map()
2070 if (s->store->userspace_supports_overflow) { in snapshot_map()
2071 if (s->valid && !s->snapshot_overflowed) { in snapshot_map()
2072 s->snapshot_overflowed = 1; in snapshot_map()
2076 __invalidate_snapshot(s, -ENOMEM); in snapshot_map()
2077 up_write(&s->lock); in snapshot_map()
2084 remap_exception(s, &pe->e, bio, chunk); in snapshot_map()
2088 if (!pe->started && io_overlaps_chunk(s, bio)) { in snapshot_map()
2092 up_read(&s->lock); in snapshot_map()
2105 up_read(&s->lock); in snapshot_map()
2111 bio_set_dev(bio, s->origin->bdev); in snapshot_map()
2112 track_chunk(s, bio, chunk); in snapshot_map()
2117 up_read(&s->lock); in snapshot_map()
2137 struct dm_snapshot *s = ti->private; in snapshot_merge_map() local
2145 bio_set_dev(bio, s->origin->bdev); in snapshot_merge_map()
2147 bio_set_dev(bio, s->cow->bdev); in snapshot_merge_map()
2157 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); in snapshot_merge_map()
2159 down_write(&s->lock); in snapshot_merge_map()
2162 if (!s->valid) in snapshot_merge_map()
2166 e = dm_lookup_exception(&s->complete, chunk); in snapshot_merge_map()
2170 chunk >= s->first_merging_chunk && in snapshot_merge_map()
2171 chunk < (s->first_merging_chunk + in snapshot_merge_map()
2172 s->num_merging_chunks)) { in snapshot_merge_map()
2173 bio_set_dev(bio, s->origin->bdev); in snapshot_merge_map()
2174 bio_list_add(&s->bios_queued_during_merge, bio); in snapshot_merge_map()
2179 remap_exception(s, e, bio, chunk); in snapshot_merge_map()
2182 track_chunk(s, bio, chunk); in snapshot_merge_map()
2187 bio_set_dev(bio, s->origin->bdev); in snapshot_merge_map()
2190 up_write(&s->lock); in snapshot_merge_map()
2191 return do_origin(s->origin, bio, false); in snapshot_merge_map()
2195 up_write(&s->lock); in snapshot_merge_map()
2203 struct dm_snapshot *s = ti->private; in snapshot_end_io() local
2206 stop_tracking_chunk(s, bio); in snapshot_end_io()
2213 struct dm_snapshot *s = ti->private; in snapshot_merge_presuspend() local
2215 stop_merge(s); in snapshot_merge_presuspend()
2221 struct dm_snapshot *s = ti->private; in snapshot_preresume() local
2225 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); in snapshot_preresume()
2228 if (s == snap_src) { in snapshot_preresume()
2246 struct dm_snapshot *s = ti->private; in snapshot_resume() local
2254 o = __lookup_dm_origin(s->origin->bdev); in snapshot_resume()
2258 (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging); in snapshot_resume()
2281 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); in snapshot_resume()
2300 reregister_snapshot(s); in snapshot_resume()
2302 down_write(&s->lock); in snapshot_resume()
2303 s->active = 1; in snapshot_resume()
2304 up_write(&s->lock); in snapshot_resume()
2320 struct dm_snapshot *s = ti->private; in snapshot_merge_resume() local
2330 ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev); in snapshot_merge_resume()
2332 start_merge(s); in snapshot_merge_resume()
2576 struct dm_snapshot *s; in do_origin() local
2577 list_for_each_entry(s, &o->snapshots, list) in do_origin()
2578 if (unlikely(!wait_for_in_progress(s, true))) in do_origin()