Lines Matching refs:s
151 struct dm_dev *dm_snap_origin(struct dm_snapshot *s) in dm_snap_origin() argument
153 return s->origin; in dm_snap_origin()
157 struct dm_dev *dm_snap_cow(struct dm_snapshot *s) in dm_snap_cow() argument
159 return s->cow; in dm_snap_cow()
236 static void track_chunk(struct dm_snapshot *s, struct bio *bio, chunk_t chunk) in track_chunk() argument
242 spin_lock_irq(&s->tracked_chunk_lock); in track_chunk()
244 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); in track_chunk()
245 spin_unlock_irq(&s->tracked_chunk_lock); in track_chunk()
248 static void stop_tracking_chunk(struct dm_snapshot *s, struct bio *bio) in stop_tracking_chunk() argument
253 spin_lock_irqsave(&s->tracked_chunk_lock, flags); in stop_tracking_chunk()
255 spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); in stop_tracking_chunk()
258 static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) in __chunk_is_tracked() argument
263 spin_lock_irq(&s->tracked_chunk_lock); in __chunk_is_tracked()
266 &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { in __chunk_is_tracked()
273 spin_unlock_irq(&s->tracked_chunk_lock); in __chunk_is_tracked()
282 static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) in __check_for_conflicting_io() argument
284 while (__chunk_is_tracked(s, chunk)) in __check_for_conflicting_io()
427 struct dm_snapshot *s; in __find_snapshots_sharing_cow() local
436 list_for_each_entry(s, &o->snapshots, list) { in __find_snapshots_sharing_cow()
437 if (dm_target_is_snapshot_merge(s->ti) && snap_merge) in __find_snapshots_sharing_cow()
438 *snap_merge = s; in __find_snapshots_sharing_cow()
439 if (!bdev_equal(s->cow->bdev, snap->cow->bdev)) in __find_snapshots_sharing_cow()
442 down_read(&s->lock); in __find_snapshots_sharing_cow()
443 active = s->active; in __find_snapshots_sharing_cow()
444 up_read(&s->lock); in __find_snapshots_sharing_cow()
448 *snap_src = s; in __find_snapshots_sharing_cow()
450 *snap_dest = s; in __find_snapshots_sharing_cow()
508 static void __insert_snapshot(struct origin *o, struct dm_snapshot *s) in __insert_snapshot() argument
514 if (l->store->chunk_size < s->store->chunk_size) in __insert_snapshot()
516 list_add_tail(&s->list, &l->list); in __insert_snapshot()
570 static void reregister_snapshot(struct dm_snapshot *s) in reregister_snapshot() argument
572 struct block_device *bdev = s->origin->bdev; in reregister_snapshot()
576 list_del(&s->list); in reregister_snapshot()
577 __insert_snapshot(__lookup_origin(bdev), s); in reregister_snapshot()
582 static void unregister_snapshot(struct dm_snapshot *s) in unregister_snapshot() argument
587 o = __lookup_origin(s->origin->bdev); in unregister_snapshot()
589 list_del(&s->list); in unregister_snapshot()
683 static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) in alloc_pending_exception() argument
685 struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, in alloc_pending_exception()
688 atomic_inc(&s->pending_exceptions_count); in alloc_pending_exception()
689 pe->snap = s; in alloc_pending_exception()
696 struct dm_snapshot *s = pe->snap; in free_pending_exception() local
698 mempool_free(pe, s->pending_pool); in free_pending_exception()
700 atomic_dec(&s->pending_exceptions_count); in free_pending_exception()
751 struct dm_snapshot *s = context; in dm_add_exception() local
763 dm_insert_exception(&s->complete, e); in dm_add_exception()
800 static int init_hash_tables(struct dm_snapshot *s) in init_hash_tables() argument
808 cow_dev_size = get_dev_size(s->cow->bdev); in init_hash_tables()
811 hash_size = cow_dev_size >> s->store->chunk_shift; in init_hash_tables()
817 if (dm_exception_table_init(&s->complete, hash_size, in init_hash_tables()
829 if (dm_exception_table_init(&s->pending, hash_size, 0)) { in init_hash_tables()
830 dm_exception_table_exit(&s->complete, exception_cache); in init_hash_tables()
837 static void merge_shutdown(struct dm_snapshot *s) in merge_shutdown() argument
839 clear_bit_unlock(RUNNING_MERGE, &s->state_bits); in merge_shutdown()
841 wake_up_bit(&s->state_bits, RUNNING_MERGE); in merge_shutdown()
844 static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) in __release_queued_bios_after_merge() argument
846 s->first_merging_chunk = 0; in __release_queued_bios_after_merge()
847 s->num_merging_chunks = 0; in __release_queued_bios_after_merge()
849 return bio_list_get(&s->bios_queued_during_merge); in __release_queued_bios_after_merge()
855 static int __remove_single_exception_chunk(struct dm_snapshot *s, in __remove_single_exception_chunk() argument
860 e = dm_lookup_exception(&s->complete, old_chunk); in __remove_single_exception_chunk()
906 static int remove_single_exception_chunk(struct dm_snapshot *s) in remove_single_exception_chunk() argument
910 chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1; in remove_single_exception_chunk()
912 down_write(&s->lock); in remove_single_exception_chunk()
919 r = __remove_single_exception_chunk(s, old_chunk); in remove_single_exception_chunk()
922 } while (old_chunk-- > s->first_merging_chunk); in remove_single_exception_chunk()
924 b = __release_queued_bios_after_merge(s); in remove_single_exception_chunk()
927 up_write(&s->lock); in remove_single_exception_chunk()
960 static void snapshot_merge_next_chunks(struct dm_snapshot *s) in snapshot_merge_next_chunks() argument
968 BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); in snapshot_merge_next_chunks()
969 if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits))) in snapshot_merge_next_chunks()
975 if (!s->valid) { in snapshot_merge_next_chunks()
980 linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk, in snapshot_merge_next_chunks()
986 down_write(&s->lock); in snapshot_merge_next_chunks()
987 s->merge_failed = 1; in snapshot_merge_next_chunks()
988 up_write(&s->lock); in snapshot_merge_next_chunks()
1001 io_size = linear_chunks * s->store->chunk_size; in snapshot_merge_next_chunks()
1003 dest.bdev = s->origin->bdev; in snapshot_merge_next_chunks()
1004 dest.sector = chunk_to_sector(s->store, old_chunk); in snapshot_merge_next_chunks()
1007 src.bdev = s->cow->bdev; in snapshot_merge_next_chunks()
1008 src.sector = chunk_to_sector(s->store, new_chunk); in snapshot_merge_next_chunks()
1021 while (origin_write_extent(s, dest.sector, io_size)) { in snapshot_merge_next_chunks()
1029 down_write(&s->lock); in snapshot_merge_next_chunks()
1030 s->first_merging_chunk = old_chunk; in snapshot_merge_next_chunks()
1031 s->num_merging_chunks = linear_chunks; in snapshot_merge_next_chunks()
1032 up_write(&s->lock); in snapshot_merge_next_chunks()
1036 __check_for_conflicting_io(s, old_chunk + i); in snapshot_merge_next_chunks()
1038 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); in snapshot_merge_next_chunks()
1042 merge_shutdown(s); in snapshot_merge_next_chunks()
1049 struct dm_snapshot *s = context; in merge_callback() local
1060 if (s->store->type->commit_merge(s->store, in merge_callback()
1061 s->num_merging_chunks) < 0) { in merge_callback()
1066 if (remove_single_exception_chunk(s) < 0) in merge_callback()
1069 snapshot_merge_next_chunks(s); in merge_callback()
1074 down_write(&s->lock); in merge_callback()
1075 s->merge_failed = 1; in merge_callback()
1076 b = __release_queued_bios_after_merge(s); in merge_callback()
1077 up_write(&s->lock); in merge_callback()
1080 merge_shutdown(s); in merge_callback()
1083 static void start_merge(struct dm_snapshot *s) in start_merge() argument
1085 if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits)) in start_merge()
1086 snapshot_merge_next_chunks(s); in start_merge()
1092 static void stop_merge(struct dm_snapshot *s) in stop_merge() argument
1094 set_bit(SHUTDOWN_MERGE, &s->state_bits); in stop_merge()
1095 wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE); in stop_merge()
1096 clear_bit(SHUTDOWN_MERGE, &s->state_bits); in stop_merge()
1104 struct dm_snapshot *s; in snapshot_ctr() local
1123 s = kmalloc(sizeof(*s), GFP_KERNEL); in snapshot_ctr()
1124 if (!s) { in snapshot_ctr()
1134 r = dm_get_device(ti, origin_path, origin_mode, &s->origin); in snapshot_ctr()
1139 origin_dev = s->origin->bdev->bd_dev; in snapshot_ctr()
1152 r = dm_get_device(ti, cow_path, dm_table_get_mode(ti->table), &s->cow); in snapshot_ctr()
1158 r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store); in snapshot_ctr()
1168 s->ti = ti; in snapshot_ctr()
1169 s->valid = 1; in snapshot_ctr()
1170 s->snapshot_overflowed = 0; in snapshot_ctr()
1171 s->active = 0; in snapshot_ctr()
1172 atomic_set(&s->pending_exceptions_count, 0); in snapshot_ctr()
1173 s->exception_start_sequence = 0; in snapshot_ctr()
1174 s->exception_complete_sequence = 0; in snapshot_ctr()
1175 INIT_LIST_HEAD(&s->out_of_order_list); in snapshot_ctr()
1176 init_rwsem(&s->lock); in snapshot_ctr()
1177 INIT_LIST_HEAD(&s->list); in snapshot_ctr()
1178 spin_lock_init(&s->pe_lock); in snapshot_ctr()
1179 s->state_bits = 0; in snapshot_ctr()
1180 s->merge_failed = 0; in snapshot_ctr()
1181 s->first_merging_chunk = 0; in snapshot_ctr()
1182 s->num_merging_chunks = 0; in snapshot_ctr()
1183 bio_list_init(&s->bios_queued_during_merge); in snapshot_ctr()
1186 if (init_hash_tables(s)) { in snapshot_ctr()
1192 s->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle); in snapshot_ctr()
1193 if (IS_ERR(s->kcopyd_client)) { in snapshot_ctr()
1194 r = PTR_ERR(s->kcopyd_client); in snapshot_ctr()
1199 s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); in snapshot_ctr()
1200 if (!s->pending_pool) { in snapshot_ctr()
1207 INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); in snapshot_ctr()
1209 spin_lock_init(&s->tracked_chunk_lock); in snapshot_ctr()
1211 ti->private = s; in snapshot_ctr()
1217 r = register_snapshot(s); in snapshot_ctr()
1233 s->store->chunk_size = 0; in snapshot_ctr()
1237 r = s->store->type->read_metadata(s->store, dm_add_exception, in snapshot_ctr()
1238 (void *)s); in snapshot_ctr()
1243 s->valid = 0; in snapshot_ctr()
1247 if (!s->store->chunk_size) { in snapshot_ctr()
1252 r = dm_set_target_max_io_len(ti, s->store->chunk_size); in snapshot_ctr()
1259 unregister_snapshot(s); in snapshot_ctr()
1262 mempool_destroy(s->pending_pool); in snapshot_ctr()
1265 dm_kcopyd_client_destroy(s->kcopyd_client); in snapshot_ctr()
1268 dm_exception_table_exit(&s->pending, pending_cache); in snapshot_ctr()
1269 dm_exception_table_exit(&s->complete, exception_cache); in snapshot_ctr()
1272 dm_exception_store_destroy(s->store); in snapshot_ctr()
1275 dm_put_device(ti, s->cow); in snapshot_ctr()
1278 dm_put_device(ti, s->origin); in snapshot_ctr()
1281 kfree(s); in snapshot_ctr()
1287 static void __free_exceptions(struct dm_snapshot *s) in __free_exceptions() argument
1289 dm_kcopyd_client_destroy(s->kcopyd_client); in __free_exceptions()
1290 s->kcopyd_client = NULL; in __free_exceptions()
1292 dm_exception_table_exit(&s->pending, pending_cache); in __free_exceptions()
1293 dm_exception_table_exit(&s->complete, exception_cache); in __free_exceptions()
1334 struct dm_snapshot *s = ti->private; in snapshot_dtr() local
1339 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); in snapshot_dtr()
1340 if (snap_src && snap_dest && (s == snap_src)) { in snapshot_dtr()
1349 stop_merge(s); in snapshot_dtr()
1353 unregister_snapshot(s); in snapshot_dtr()
1355 while (atomic_read(&s->pending_exceptions_count)) in snapshot_dtr()
1365 BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); in snapshot_dtr()
1368 __free_exceptions(s); in snapshot_dtr()
1370 mempool_destroy(s->pending_pool); in snapshot_dtr()
1372 dm_exception_store_destroy(s->store); in snapshot_dtr()
1374 dm_put_device(ti, s->cow); in snapshot_dtr()
1376 dm_put_device(ti, s->origin); in snapshot_dtr()
1378 kfree(s); in snapshot_dtr()
1401 static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) in retry_origin_bios() argument
1409 r = do_origin(s->origin, bio); in retry_origin_bios()
1431 static void __invalidate_snapshot(struct dm_snapshot *s, int err) in __invalidate_snapshot() argument
1433 if (!s->valid) in __invalidate_snapshot()
1441 if (s->store->type->drop_snapshot) in __invalidate_snapshot()
1442 s->store->type->drop_snapshot(s->store); in __invalidate_snapshot()
1444 s->valid = 0; in __invalidate_snapshot()
1446 dm_table_event(s->ti->table); in __invalidate_snapshot()
1453 struct dm_snapshot *s = pe->snap; in pending_complete() local
1461 down_write(&s->lock); in pending_complete()
1462 __invalidate_snapshot(s, -EIO); in pending_complete()
1469 down_write(&s->lock); in pending_complete()
1470 __invalidate_snapshot(s, -ENOMEM); in pending_complete()
1476 down_write(&s->lock); in pending_complete()
1477 if (!s->valid) { in pending_complete()
1484 __check_for_conflicting_io(s, pe->e.old_chunk); in pending_complete()
1490 dm_insert_exception(&s->complete, e); in pending_complete()
1501 up_write(&s->lock); in pending_complete()
1514 retry_origin_bios(s, origin_bios); in pending_complete()
1521 struct dm_snapshot *s = pe->snap; in complete_exception() local
1524 s->store->type->commit_exception(s->store, &pe->e, !pe->copy_error, in complete_exception()
1535 struct dm_snapshot *s = pe->snap; in copy_callback() local
1539 if (pe->exception_sequence == s->exception_complete_sequence) { in copy_callback()
1540 s->exception_complete_sequence++; in copy_callback()
1543 while (!list_empty(&s->out_of_order_list)) { in copy_callback()
1544 pe = list_entry(s->out_of_order_list.next, in copy_callback()
1546 if (pe->exception_sequence != s->exception_complete_sequence) in copy_callback()
1548 s->exception_complete_sequence++; in copy_callback()
1556 list_for_each_prev(lh, &s->out_of_order_list) { in copy_callback()
1570 struct dm_snapshot *s = pe->snap; in start_copy() local
1572 struct block_device *bdev = s->origin->bdev; in start_copy()
1578 src.sector = chunk_to_sector(s->store, pe->e.old_chunk); in start_copy()
1579 src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); in start_copy()
1581 dest.bdev = s->cow->bdev; in start_copy()
1582 dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); in start_copy()
1586 dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, copy_callback, pe); in start_copy()
1599 struct dm_snapshot *s = pe->snap; in start_full_bio() local
1605 callback_data = dm_kcopyd_prepare_callback(s->kcopyd_client, in start_full_bio()
1615 __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) in __lookup_pending_exception() argument
1617 struct dm_exception *e = dm_lookup_exception(&s->pending, chunk); in __lookup_pending_exception()
1634 __find_pending_exception(struct dm_snapshot *s, in __find_pending_exception() argument
1639 pe2 = __lookup_pending_exception(s, chunk); in __find_pending_exception()
1651 if (s->store->type->prepare_exception(s->store, &pe->e)) { in __find_pending_exception()
1656 pe->exception_sequence = s->exception_start_sequence++; in __find_pending_exception()
1658 dm_insert_exception(&s->pending, &pe->e); in __find_pending_exception()
1663 static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, in remap_exception() argument
1666 bio->bi_bdev = s->cow->bdev; in remap_exception()
1668 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + in remap_exception()
1670 (bio->bi_iter.bi_sector & s->store->chunk_mask); in remap_exception()
1676 struct dm_snapshot *s = ti->private; in snapshot_map() local
1684 bio->bi_bdev = s->cow->bdev; in snapshot_map()
1688 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); in snapshot_map()
1692 if (!s->valid) in snapshot_map()
1697 down_write(&s->lock); in snapshot_map()
1699 if (!s->valid || (unlikely(s->snapshot_overflowed) && in snapshot_map()
1706 e = dm_lookup_exception(&s->complete, chunk); in snapshot_map()
1708 remap_exception(s, e, bio, chunk); in snapshot_map()
1718 pe = __lookup_pending_exception(s, chunk); in snapshot_map()
1720 up_write(&s->lock); in snapshot_map()
1721 pe = alloc_pending_exception(s); in snapshot_map()
1722 down_write(&s->lock); in snapshot_map()
1724 if (!s->valid || s->snapshot_overflowed) { in snapshot_map()
1730 e = dm_lookup_exception(&s->complete, chunk); in snapshot_map()
1733 remap_exception(s, e, bio, chunk); in snapshot_map()
1737 pe = __find_pending_exception(s, pe, chunk); in snapshot_map()
1739 if (s->store->userspace_supports_overflow) { in snapshot_map()
1740 s->snapshot_overflowed = 1; in snapshot_map()
1743 __invalidate_snapshot(s, -ENOMEM); in snapshot_map()
1749 remap_exception(s, &pe->e, bio, chunk); in snapshot_map()
1755 (s->store->chunk_size << SECTOR_SHIFT)) { in snapshot_map()
1757 up_write(&s->lock); in snapshot_map()
1767 up_write(&s->lock); in snapshot_map()
1772 bio->bi_bdev = s->origin->bdev; in snapshot_map()
1773 track_chunk(s, bio, chunk); in snapshot_map()
1777 up_write(&s->lock); in snapshot_map()
1797 struct dm_snapshot *s = ti->private; in snapshot_merge_map() local
1805 bio->bi_bdev = s->origin->bdev; in snapshot_merge_map()
1807 bio->bi_bdev = s->cow->bdev; in snapshot_merge_map()
1811 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); in snapshot_merge_map()
1813 down_write(&s->lock); in snapshot_merge_map()
1816 if (!s->valid) in snapshot_merge_map()
1820 e = dm_lookup_exception(&s->complete, chunk); in snapshot_merge_map()
1824 chunk >= s->first_merging_chunk && in snapshot_merge_map()
1825 chunk < (s->first_merging_chunk + in snapshot_merge_map()
1826 s->num_merging_chunks)) { in snapshot_merge_map()
1827 bio->bi_bdev = s->origin->bdev; in snapshot_merge_map()
1828 bio_list_add(&s->bios_queued_during_merge, bio); in snapshot_merge_map()
1833 remap_exception(s, e, bio, chunk); in snapshot_merge_map()
1836 track_chunk(s, bio, chunk); in snapshot_merge_map()
1841 bio->bi_bdev = s->origin->bdev; in snapshot_merge_map()
1844 up_write(&s->lock); in snapshot_merge_map()
1845 return do_origin(s->origin, bio); in snapshot_merge_map()
1849 up_write(&s->lock); in snapshot_merge_map()
1856 struct dm_snapshot *s = ti->private; in snapshot_end_io() local
1859 stop_tracking_chunk(s, bio); in snapshot_end_io()
1866 struct dm_snapshot *s = ti->private; in snapshot_merge_presuspend() local
1868 stop_merge(s); in snapshot_merge_presuspend()
1874 struct dm_snapshot *s = ti->private; in snapshot_preresume() local
1878 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); in snapshot_preresume()
1881 if (s == snap_src) { in snapshot_preresume()
1899 struct dm_snapshot *s = ti->private; in snapshot_resume() local
1907 o = __lookup_dm_origin(s->origin->bdev); in snapshot_resume()
1911 (void) __find_snapshots_sharing_cow(s, NULL, NULL, &snap_merging); in snapshot_resume()
1934 (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); in snapshot_resume()
1953 reregister_snapshot(s); in snapshot_resume()
1955 down_write(&s->lock); in snapshot_resume()
1956 s->active = 1; in snapshot_resume()
1957 up_write(&s->lock); in snapshot_resume()
1973 struct dm_snapshot *s = ti->private; in snapshot_merge_resume() local
1983 ti->max_io_len = get_origin_minimum_chunksize(s->origin->bdev); in snapshot_merge_resume()
1985 start_merge(s); in snapshot_merge_resume()