Lines Matching +full:cell +full:- +full:count
2 * Copyright (C) 2011-2012 Red Hat UK.
7 #include "dm-thin-metadata.h"
8 #include "dm-bio-prison-v1.h"
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
50 #define MAX_DEV_ID ((1 << 24) - 1)
56 * We use a standard copy-on-write btree to store the mappings for the
57 * devices (note I'm talking about copy-on-write of the metadata here, not
94 * - The origin mapping will point to the old origin block (the shared
98 * - The snap mapping still points to the old block. As it would after
110 /*----------------------------------------------------------------*/
123 key->virtual = (ls == VIRTUAL); in build_key()
124 key->dev = dm_thin_dev_id(td); in build_key()
125 key->block_begin = b; in build_key()
126 key->block_end = e; in build_key()
141 /*----------------------------------------------------------------*/
153 init_rwsem(&t->lock); in throttle_init()
154 t->throttle_applied = false; in throttle_init()
159 t->threshold = jiffies + THROTTLE_THRESHOLD; in throttle_work_start()
164 if (!t->throttle_applied && jiffies > t->threshold) { in throttle_work_update()
165 down_write(&t->lock); in throttle_work_update()
166 t->throttle_applied = true; in throttle_work_update()
172 if (t->throttle_applied) { in throttle_work_complete()
173 t->throttle_applied = false; in throttle_work_complete()
174 up_write(&t->lock); in throttle_work_complete()
180 down_read(&t->lock); in throttle_lock()
185 up_read(&t->lock); in throttle_unlock()
188 /*----------------------------------------------------------------*/
224 typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
293 return pool->pf.mode; in get_pool_mode()
300 "out-of-data-space", in notify_of_pool_mode_change()
301 "read-only", in notify_of_pool_mode_change()
302 "read-only", in notify_of_pool_mode_change()
309 if (!pool->pf.error_if_no_space) in notify_of_pool_mode_change()
315 dm_table_event(pool->ti->table); in notify_of_pool_mode_change()
317 dm_device_name(pool->pool_md), in notify_of_pool_mode_change()
364 /*----------------------------------------------------------------*/
368 return pool->sectors_per_block_shift >= 0; in block_size_is_power_of_two()
374 (b << pool->sectors_per_block_shift) : in block_to_sectors()
375 (b * pool->sectors_per_block); in block_to_sectors()
378 /*----------------------------------------------------------------*/
391 op->tc = tc; in begin_discard()
392 blk_start_plug(&op->plug); in begin_discard()
393 op->parent_bio = parent; in begin_discard()
394 op->bio = NULL; in begin_discard()
399 struct thin_c *tc = op->tc; in issue_discard()
400 sector_t s = block_to_sectors(tc->pool, data_b); in issue_discard()
401 sector_t len = block_to_sectors(tc->pool, data_e - data_b); in issue_discard()
403 return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, in issue_discard()
404 GFP_NOWAIT, 0, &op->bio); in issue_discard()
409 if (op->bio) { in end_discard()
414 bio_chain(op->bio, op->parent_bio); in end_discard()
415 bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0); in end_discard()
416 submit_bio(op->bio); in end_discard()
419 blk_finish_plug(&op->plug); in end_discard()
425 if (r && !op->parent_bio->bi_status) in end_discard()
426 op->parent_bio->bi_status = errno_to_blk_status(r); in end_discard()
427 bio_endio(op->parent_bio); in end_discard()
430 /*----------------------------------------------------------------*/
438 queue_work(pool->wq, &pool->worker); in wake_worker()
441 /*----------------------------------------------------------------*/
450 * Allocate a cell from the prison's mempool. in bio_detain()
453 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO); in bio_detain()
455 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); in bio_detain()
458 * We reused an old cell; we can get rid of in bio_detain()
461 dm_bio_prison_free_cell(pool->prison, cell_prealloc); in bio_detain()
467 struct dm_bio_prison_cell *cell, in cell_release() argument
470 dm_cell_release(pool->prison, cell, bios); in cell_release()
471 dm_bio_prison_free_cell(pool->prison, cell); in cell_release()
477 struct dm_bio_prison_cell *cell) in cell_visit_release() argument
479 dm_cell_visit_release(pool->prison, fn, context, cell); in cell_visit_release()
480 dm_bio_prison_free_cell(pool->prison, cell); in cell_visit_release()
484 struct dm_bio_prison_cell *cell, in cell_release_no_holder() argument
487 dm_cell_release_no_holder(pool->prison, cell, bios); in cell_release_no_holder()
488 dm_bio_prison_free_cell(pool->prison, cell); in cell_release_no_holder()
492 struct dm_bio_prison_cell *cell, blk_status_t error_code) in cell_error_with_code() argument
494 dm_cell_error(pool->prison, cell, error_code); in cell_error_with_code()
495 dm_bio_prison_free_cell(pool->prison, cell); in cell_error_with_code()
500 return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR; in get_pool_io_error_code()
503 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_error() argument
505 cell_error_with_code(pool, cell, get_pool_io_error_code(pool)); in cell_error()
508 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_success() argument
510 cell_error_with_code(pool, cell, 0); in cell_success()
513 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_requeue() argument
515 cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE); in cell_requeue()
518 /*----------------------------------------------------------------*/
542 list_add(&pool->list, &dm_thin_pool_table.pools); in __pool_table_insert()
548 list_del(&pool->list); in __pool_table_remove()
558 if (tmp->pool_md == md) { in __pool_table_lookup()
574 if (tmp->md_dev == md_dev) { in __pool_table_lookup_metadata_dev()
583 /*----------------------------------------------------------------*/
591 struct dm_bio_prison_cell *cell; member
605 bio->bi_status = error; in error_bio_list()
617 spin_lock_irq(&tc->lock); in error_thin_bio_list()
619 spin_unlock_irq(&tc->lock); in error_thin_bio_list()
626 struct pool *pool = tc->pool; in requeue_deferred_cells()
628 struct dm_bio_prison_cell *cell, *tmp; in requeue_deferred_cells() local
632 spin_lock_irq(&tc->lock); in requeue_deferred_cells()
633 list_splice_init(&tc->deferred_cells, &cells); in requeue_deferred_cells()
634 spin_unlock_irq(&tc->lock); in requeue_deferred_cells()
636 list_for_each_entry_safe(cell, tmp, &cells, user_list) in requeue_deferred_cells()
637 cell_requeue(pool, cell); in requeue_deferred_cells()
646 spin_lock_irq(&tc->lock); in requeue_io()
647 __merge_bio_list(&bios, &tc->deferred_bio_list); in requeue_io()
648 __merge_bio_list(&bios, &tc->retry_on_resume_list); in requeue_io()
649 spin_unlock_irq(&tc->lock); in requeue_io()
660 list_for_each_entry_rcu(tc, &pool->active_thins, list) in error_retry_list_with_code()
661 error_thin_bio_list(tc, &tc->retry_on_resume_list, error); in error_retry_list_with_code()
673 * but most is exclusively called from the thin target rather than the thin-pool
679 struct pool *pool = tc->pool; in get_bio_block()
680 sector_t block_nr = bio->bi_iter.bi_sector; in get_bio_block()
683 block_nr >>= pool->sectors_per_block_shift; in get_bio_block()
685 (void) sector_div(block_nr, pool->sectors_per_block); in get_bio_block()
696 struct pool *pool = tc->pool; in get_bio_block_range()
697 sector_t b = bio->bi_iter.bi_sector; in get_bio_block_range()
698 sector_t e = b + (bio->bi_iter.bi_size >> SECTOR_SHIFT); in get_bio_block_range()
700 b += pool->sectors_per_block - 1ull; /* so we round up */ in get_bio_block_range()
703 b >>= pool->sectors_per_block_shift; in get_bio_block_range()
704 e >>= pool->sectors_per_block_shift; in get_bio_block_range()
706 (void) sector_div(b, pool->sectors_per_block); in get_bio_block_range()
707 (void) sector_div(e, pool->sectors_per_block); in get_bio_block_range()
720 struct pool *pool = tc->pool; in remap()
721 sector_t bi_sector = bio->bi_iter.bi_sector; in remap()
723 bio_set_dev(bio, tc->pool_dev->bdev); in remap()
725 bio->bi_iter.bi_sector = in remap()
726 (block << pool->sectors_per_block_shift) | in remap()
727 (bi_sector & (pool->sectors_per_block - 1)); in remap()
729 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap()
730 sector_div(bi_sector, pool->sectors_per_block); in remap()
735 bio_set_dev(bio, tc->origin_dev->bdev); in remap_to_origin()
740 return op_is_flush(bio->bi_opf) && in bio_triggers_commit()
741 dm_thin_changed_this_transaction(tc->td); in bio_triggers_commit()
752 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds); in inc_all_io_entry()
757 struct pool *pool = tc->pool; in issue()
769 if (dm_thin_aborted_changes(tc->td)) { in issue()
778 spin_lock_irq(&pool->lock); in issue()
779 bio_list_add(&pool->deferred_flush_bios, bio); in issue()
780 spin_unlock_irq(&pool->lock); in issue()
796 /*----------------------------------------------------------------*/
818 struct dm_bio_prison_cell *cell; member
823 * still be in the cell, so care has to be taken to avoid issuing
832 struct pool *pool = m->tc->pool; in __complete_mapping_preparation()
834 if (atomic_dec_and_test(&m->prepare_actions)) { in __complete_mapping_preparation()
835 list_add_tail(&m->list, &pool->prepared_mappings); in __complete_mapping_preparation()
843 struct pool *pool = m->tc->pool; in complete_mapping_preparation()
845 spin_lock_irqsave(&pool->lock, flags); in complete_mapping_preparation()
847 spin_unlock_irqrestore(&pool->lock, flags); in complete_mapping_preparation()
854 m->status = read_err || write_err ? BLK_STS_IOERR : 0; in copy_complete()
861 struct dm_thin_new_mapping *m = h->overwrite_mapping; in overwrite_endio()
863 bio->bi_end_io = m->saved_bi_end_io; in overwrite_endio()
865 m->status = bio->bi_status; in overwrite_endio()
869 /*----------------------------------------------------------------*/
880 * This sends the bios in the cell, except the original holder, back
883 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell) in cell_defer_no_holder() argument
885 struct pool *pool = tc->pool; in cell_defer_no_holder()
889 spin_lock_irqsave(&tc->lock, flags); in cell_defer_no_holder()
890 cell_release_no_holder(pool, cell, &tc->deferred_bio_list); in cell_defer_no_holder()
891 has_work = !bio_list_empty(&tc->deferred_bio_list); in cell_defer_no_holder()
892 spin_unlock_irqrestore(&tc->lock, flags); in cell_defer_no_holder()
907 struct dm_bio_prison_cell *cell) in __inc_remap_and_issue_cell() argument
912 while ((bio = bio_list_pop(&cell->bios))) { in __inc_remap_and_issue_cell()
913 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) in __inc_remap_and_issue_cell()
914 bio_list_add(&info->defer_bios, bio); in __inc_remap_and_issue_cell()
916 inc_all_io_entry(info->tc->pool, bio); in __inc_remap_and_issue_cell()
923 bio_list_add(&info->issue_bios, bio); in __inc_remap_and_issue_cell()
929 struct dm_bio_prison_cell *cell, in inc_remap_and_issue_cell() argument
941 * before the cell is released, and avoid a race with new bios in inc_remap_and_issue_cell()
942 * being added to the cell. in inc_remap_and_issue_cell()
944 cell_visit_release(tc->pool, __inc_remap_and_issue_cell, in inc_remap_and_issue_cell()
945 &info, cell); in inc_remap_and_issue_cell()
956 cell_error(m->tc->pool, m->cell); in process_prepared_mapping_fail()
957 list_del(&m->list); in process_prepared_mapping_fail()
958 mempool_free(m, &m->tc->pool->mapping_pool); in process_prepared_mapping_fail()
963 struct pool *pool = tc->pool; in complete_overwrite_bio()
979 if (dm_thin_aborted_changes(tc->td)) { in complete_overwrite_bio()
988 spin_lock_irq(&pool->lock); in complete_overwrite_bio()
989 bio_list_add(&pool->deferred_flush_completions, bio); in complete_overwrite_bio()
990 spin_unlock_irq(&pool->lock); in complete_overwrite_bio()
995 struct thin_c *tc = m->tc; in process_prepared_mapping()
996 struct pool *pool = tc->pool; in process_prepared_mapping()
997 struct bio *bio = m->bio; in process_prepared_mapping()
1000 if (m->status) { in process_prepared_mapping()
1001 cell_error(pool, m->cell); in process_prepared_mapping()
1010 r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block); in process_prepared_mapping()
1013 cell_error(pool, m->cell); in process_prepared_mapping()
1021 * the bios in the cell. in process_prepared_mapping()
1024 inc_remap_and_issue_cell(tc, m->cell, m->data_block); in process_prepared_mapping()
1027 inc_all_io_entry(tc->pool, m->cell->holder); in process_prepared_mapping()
1028 remap_and_issue(tc, m->cell->holder, m->data_block); in process_prepared_mapping()
1029 inc_remap_and_issue_cell(tc, m->cell, m->data_block); in process_prepared_mapping()
1033 list_del(&m->list); in process_prepared_mapping()
1034 mempool_free(m, &pool->mapping_pool); in process_prepared_mapping()
1037 /*----------------------------------------------------------------*/
1041 struct thin_c *tc = m->tc; in free_discard_mapping()
1042 if (m->cell) in free_discard_mapping()
1043 cell_defer_no_holder(tc, m->cell); in free_discard_mapping()
1044 mempool_free(m, &tc->pool->mapping_pool); in free_discard_mapping()
1049 bio_io_error(m->bio); in process_prepared_discard_fail()
1055 bio_endio(m->bio); in process_prepared_discard_success()
1062 struct thin_c *tc = m->tc; in process_prepared_discard_no_passdown()
1064 r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end); in process_prepared_discard_no_passdown()
1066 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r); in process_prepared_discard_no_passdown()
1067 bio_io_error(m->bio); in process_prepared_discard_no_passdown()
1069 bio_endio(m->bio); in process_prepared_discard_no_passdown()
1071 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_no_passdown()
1072 mempool_free(m, &tc->pool->mapping_pool); in process_prepared_discard_no_passdown()
1075 /*----------------------------------------------------------------*/
1086 struct thin_c *tc = m->tc; in passdown_double_checking_shared_status()
1087 struct pool *pool = tc->pool; in passdown_double_checking_shared_status()
1088 dm_block_t b = m->data_block, e, end = m->data_block + m->virt_end - m->virt_begin; in passdown_double_checking_shared_status()
1095 r = dm_pool_block_is_shared(pool->pmd, b, &shared); in passdown_double_checking_shared_status()
1108 r = dm_pool_block_is_shared(pool->pmd, e, &shared); in passdown_double_checking_shared_status()
1129 struct pool *pool = m->tc->pool; in queue_passdown_pt2()
1131 spin_lock_irqsave(&pool->lock, flags); in queue_passdown_pt2()
1132 list_add_tail(&m->list, &pool->prepared_discards_pt2); in queue_passdown_pt2()
1133 spin_unlock_irqrestore(&pool->lock, flags); in queue_passdown_pt2()
1143 queue_passdown_pt2(bio->bi_private); in passdown_endio()
1150 struct thin_c *tc = m->tc; in process_prepared_discard_passdown_pt1()
1151 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt1()
1153 dm_block_t data_end = m->data_block + (m->virt_end - m->virt_begin); in process_prepared_discard_passdown_pt1()
1160 r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end); in process_prepared_discard_passdown_pt1()
1163 bio_io_error(m->bio); in process_prepared_discard_passdown_pt1()
1164 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_passdown_pt1()
1165 mempool_free(m, &pool->mapping_pool); in process_prepared_discard_passdown_pt1()
1173 r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); in process_prepared_discard_passdown_pt1()
1176 bio_io_error(m->bio); in process_prepared_discard_passdown_pt1()
1177 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_passdown_pt1()
1178 mempool_free(m, &pool->mapping_pool); in process_prepared_discard_passdown_pt1()
1185 dm_device_name(tc->pool->pool_md)); in process_prepared_discard_passdown_pt1()
1189 discard_parent->bi_end_io = passdown_endio; in process_prepared_discard_passdown_pt1()
1190 discard_parent->bi_private = m; in process_prepared_discard_passdown_pt1()
1192 if (m->maybe_shared) in process_prepared_discard_passdown_pt1()
1198 r = issue_discard(&op, m->data_block, data_end); in process_prepared_discard_passdown_pt1()
1207 struct thin_c *tc = m->tc; in process_prepared_discard_passdown_pt2()
1208 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt2()
1214 r = dm_pool_dec_data_range(pool->pmd, m->data_block, in process_prepared_discard_passdown_pt2()
1215 m->data_block + (m->virt_end - m->virt_begin)); in process_prepared_discard_passdown_pt2()
1218 bio_io_error(m->bio); in process_prepared_discard_passdown_pt2()
1220 bio_endio(m->bio); in process_prepared_discard_passdown_pt2()
1222 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_passdown_pt2()
1223 mempool_free(m, &pool->mapping_pool); in process_prepared_discard_passdown_pt2()
1233 spin_lock_irq(&pool->lock); in process_prepared()
1235 spin_unlock_irq(&pool->lock); in process_prepared()
1246 return bio->bi_iter.bi_size == in io_overlaps_block()
1247 (pool->sectors_per_block << SECTOR_SHIFT); in io_overlaps_block()
1259 *save = bio->bi_end_io; in save_and_set_endio()
1260 bio->bi_end_io = fn; in save_and_set_endio()
1265 if (pool->next_mapping) in ensure_next_mapping()
1268 pool->next_mapping = mempool_alloc(&pool->mapping_pool, GFP_ATOMIC); in ensure_next_mapping()
1270 return pool->next_mapping ? 0 : -ENOMEM; in ensure_next_mapping()
1275 struct dm_thin_new_mapping *m = pool->next_mapping; in get_next_mapping()
1277 BUG_ON(!pool->next_mapping); in get_next_mapping()
1280 INIT_LIST_HEAD(&m->list); in get_next_mapping()
1281 m->bio = NULL; in get_next_mapping()
1283 pool->next_mapping = NULL; in get_next_mapping()
1293 to.bdev = tc->pool_dev->bdev; in ll_zero()
1295 to.count = end - begin; in ll_zero()
1297 dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m); in ll_zero()
1304 struct pool *pool = tc->pool; in remap_and_issue_overwrite()
1307 h->overwrite_mapping = m; in remap_and_issue_overwrite()
1308 m->bio = bio; in remap_and_issue_overwrite()
1309 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); in remap_and_issue_overwrite()
1320 struct dm_bio_prison_cell *cell, struct bio *bio, in schedule_copy() argument
1323 struct pool *pool = tc->pool; in schedule_copy()
1326 m->tc = tc; in schedule_copy()
1327 m->virt_begin = virt_block; in schedule_copy()
1328 m->virt_end = virt_block + 1u; in schedule_copy()
1329 m->data_block = data_dest; in schedule_copy()
1330 m->cell = cell; in schedule_copy()
1337 atomic_set(&m->prepare_actions, 3); in schedule_copy()
1339 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) in schedule_copy()
1353 from.bdev = origin->bdev; in schedule_copy()
1354 from.sector = data_origin * pool->sectors_per_block; in schedule_copy()
1355 from.count = len; in schedule_copy()
1357 to.bdev = tc->pool_dev->bdev; in schedule_copy()
1358 to.sector = data_dest * pool->sectors_per_block; in schedule_copy()
1359 to.count = len; in schedule_copy()
1361 dm_kcopyd_copy(pool->copier, &from, 1, &to, in schedule_copy()
1367 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) { in schedule_copy()
1368 atomic_inc(&m->prepare_actions); in schedule_copy()
1370 data_dest * pool->sectors_per_block + len, in schedule_copy()
1371 (data_dest + 1) * pool->sectors_per_block); in schedule_copy()
1380 struct dm_bio_prison_cell *cell, struct bio *bio) in schedule_internal_copy() argument
1382 schedule_copy(tc, virt_block, tc->pool_dev, in schedule_internal_copy()
1383 data_origin, data_dest, cell, bio, in schedule_internal_copy()
1384 tc->pool->sectors_per_block); in schedule_internal_copy()
1388 dm_block_t data_block, struct dm_bio_prison_cell *cell, in schedule_zero() argument
1391 struct pool *pool = tc->pool; in schedule_zero()
1394 atomic_set(&m->prepare_actions, 1); /* no need to quiesce */ in schedule_zero()
1395 m->tc = tc; in schedule_zero()
1396 m->virt_begin = virt_block; in schedule_zero()
1397 m->virt_end = virt_block + 1u; in schedule_zero()
1398 m->data_block = data_block; in schedule_zero()
1399 m->cell = cell; in schedule_zero()
1403 * zeroing pre-existing data, we can issue the bio immediately. in schedule_zero()
1406 if (pool->pf.zero_new_blocks) { in schedule_zero()
1410 ll_zero(tc, m, data_block * pool->sectors_per_block, in schedule_zero()
1411 (data_block + 1) * pool->sectors_per_block); in schedule_zero()
1418 struct dm_bio_prison_cell *cell, struct bio *bio) in schedule_external_copy() argument
1420 struct pool *pool = tc->pool; in schedule_external_copy()
1421 sector_t virt_block_begin = virt_block * pool->sectors_per_block; in schedule_external_copy()
1422 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block; in schedule_external_copy()
1424 if (virt_block_end <= tc->origin_size) in schedule_external_copy()
1425 schedule_copy(tc, virt_block, tc->origin_dev, in schedule_external_copy()
1426 virt_block, data_dest, cell, bio, in schedule_external_copy()
1427 pool->sectors_per_block); in schedule_external_copy()
1429 else if (virt_block_begin < tc->origin_size) in schedule_external_copy()
1430 schedule_copy(tc, virt_block, tc->origin_dev, in schedule_external_copy()
1431 virt_block, data_dest, cell, bio, in schedule_external_copy()
1432 tc->origin_size - virt_block_begin); in schedule_external_copy()
1435 schedule_zero(tc, virt_block, data_dest, cell, bio); in schedule_external_copy()
1458 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free); in check_for_metadata_space()
1478 r = dm_pool_get_free_block_count(pool->pmd, &nr_free); in check_for_data_space()
1489 * A non-zero return indicates read_only or fail_io mode.
1497 return -EINVAL; in commit()
1499 r = dm_pool_commit_metadata(pool->pmd); in commit()
1512 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { in check_low_water_mark()
1514 dm_device_name(pool->pool_md)); in check_low_water_mark()
1515 spin_lock_irq(&pool->lock); in check_low_water_mark()
1516 pool->low_water_triggered = true; in check_low_water_mark()
1517 spin_unlock_irq(&pool->lock); in check_low_water_mark()
1518 dm_table_event(pool->ti->table); in check_low_water_mark()
1526 struct pool *pool = tc->pool; in alloc_data_block()
1529 return -EINVAL; in alloc_data_block()
1531 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1548 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1556 return -ENOSPC; in alloc_data_block()
1560 r = dm_pool_alloc_data_block(pool->pmd, result); in alloc_data_block()
1562 if (r == -ENOSPC) in alloc_data_block()
1569 r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1592 struct thin_c *tc = h->tc; in retry_on_resume()
1594 spin_lock_irq(&tc->lock); in retry_on_resume()
1595 bio_list_add(&tc->retry_on_resume_list, bio); in retry_on_resume()
1596 spin_unlock_irq(&tc->lock); in retry_on_resume()
1610 return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0; in should_error_unserviceable_bio()
1628 bio->bi_status = error; in handle_unserviceable_bio()
1634 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell) in retry_bios_on_resume() argument
1642 cell_error_with_code(pool, cell, error); in retry_bios_on_resume()
1647 cell_release(pool, cell, &bios); in retry_bios_on_resume()
1656 struct pool *pool = tc->pool; in process_discard_cell_no_passdown()
1663 m->tc = tc; in process_discard_cell_no_passdown()
1664 m->virt_begin = virt_cell->key.block_begin; in process_discard_cell_no_passdown()
1665 m->virt_end = virt_cell->key.block_end; in process_discard_cell_no_passdown()
1666 m->cell = virt_cell; in process_discard_cell_no_passdown()
1667 m->bio = virt_cell->holder; in process_discard_cell_no_passdown()
1669 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) in process_discard_cell_no_passdown()
1670 pool->process_prepared_discard(m); in process_discard_cell_no_passdown()
1676 struct pool *pool = tc->pool; in break_up_discard_bio()
1691 r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end, in break_up_discard_bio()
1700 build_key(tc->td, PHYSICAL, data_begin, data_begin + (virt_end - virt_begin), &data_key); in break_up_discard_bio()
1701 if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) { in break_up_discard_bio()
1712 m->tc = tc; in break_up_discard_bio()
1713 m->maybe_shared = maybe_shared; in break_up_discard_bio()
1714 m->virt_begin = virt_begin; in break_up_discard_bio()
1715 m->virt_end = virt_end; in break_up_discard_bio()
1716 m->data_block = data_begin; in break_up_discard_bio()
1717 m->cell = data_cell; in break_up_discard_bio()
1718 m->bio = bio; in break_up_discard_bio()
1724 * This per-mapping bi_remaining increment is paired with in break_up_discard_bio()
1729 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) in break_up_discard_bio()
1730 pool->process_prepared_discard(m); in break_up_discard_bio()
1738 struct bio *bio = virt_cell->holder; in process_discard_cell_passdown()
1746 h->cell = virt_cell; in process_discard_cell_passdown()
1747 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio); in process_discard_cell_passdown()
1772 build_key(tc->td, VIRTUAL, begin, end, &virt_key); in process_discard_bio()
1773 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) in process_discard_bio()
1779 * cell will never be granted. in process_discard_bio()
1783 tc->pool->process_discard_cell(tc, virt_cell); in process_discard_bio()
1789 struct dm_bio_prison_cell *cell) in break_sharing() argument
1793 struct pool *pool = tc->pool; in break_sharing()
1798 schedule_internal_copy(tc, block, lookup_result->block, in break_sharing()
1799 data_block, cell, bio); in break_sharing()
1802 case -ENOSPC: in break_sharing()
1803 retry_bios_on_resume(pool, cell); in break_sharing()
1809 cell_error(pool, cell); in break_sharing()
1815 struct dm_bio_prison_cell *cell) in __remap_and_issue_shared_cell() argument
1820 while ((bio = bio_list_pop(&cell->bios))) { in __remap_and_issue_shared_cell()
1821 if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) || in __remap_and_issue_shared_cell()
1823 bio_list_add(&info->defer_bios, bio); in __remap_and_issue_shared_cell()
1827 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds); in __remap_and_issue_shared_cell()
1828 inc_all_io_entry(info->tc->pool, bio); in __remap_and_issue_shared_cell()
1829 bio_list_add(&info->issue_bios, bio); in __remap_and_issue_shared_cell()
1835 struct dm_bio_prison_cell *cell, in remap_and_issue_shared_cell() argument
1845 cell_visit_release(tc->pool, __remap_and_issue_shared_cell, in remap_and_issue_shared_cell()
1846 &info, cell); in remap_and_issue_shared_cell()
1861 struct pool *pool = tc->pool; in process_shared_bio()
1865 * If cell is already occupied, then sharing is already in the process in process_shared_bio()
1868 build_data_key(tc->td, lookup_result->block, &key); in process_shared_bio()
1874 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) { in process_shared_bio()
1880 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); in process_shared_bio()
1882 remap_and_issue(tc, bio, lookup_result->block); in process_shared_bio()
1884 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block); in process_shared_bio()
1885 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block); in process_shared_bio()
1890 struct dm_bio_prison_cell *cell) in provision_block() argument
1894 struct pool *pool = tc->pool; in provision_block()
1899 if (!bio->bi_iter.bi_size) { in provision_block()
1901 cell_defer_no_holder(tc, cell); in provision_block()
1912 cell_defer_no_holder(tc, cell); in provision_block()
1920 if (tc->origin_dev) in provision_block()
1921 schedule_external_copy(tc, block, data_block, cell, bio); in provision_block()
1923 schedule_zero(tc, block, data_block, cell, bio); in provision_block()
1926 case -ENOSPC: in provision_block()
1927 retry_bios_on_resume(pool, cell); in provision_block()
1933 cell_error(pool, cell); in provision_block()
1938 static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell() argument
1941 struct pool *pool = tc->pool; in process_cell()
1942 struct bio *bio = cell->holder; in process_cell()
1946 if (tc->requeue_mode) { in process_cell()
1947 cell_requeue(pool, cell); in process_cell()
1951 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); in process_cell()
1955 process_shared_bio(tc, bio, block, &lookup_result, cell); in process_cell()
1959 inc_remap_and_issue_cell(tc, cell, lookup_result.block); in process_cell()
1963 case -ENODATA: in process_cell()
1964 if (bio_data_dir(bio) == READ && tc->origin_dev) { in process_cell()
1966 cell_defer_no_holder(tc, cell); in process_cell()
1968 if (bio_end_sector(bio) <= tc->origin_size) in process_cell()
1971 else if (bio->bi_iter.bi_sector < tc->origin_size) { in process_cell()
1973 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell()
1981 provision_block(tc, bio, block, cell); in process_cell()
1987 cell_defer_no_holder(tc, cell); in process_cell()
1995 struct pool *pool = tc->pool; in process_bio()
1997 struct dm_bio_prison_cell *cell; in process_bio() local
2001 * If cell is already occupied, then the block is already in process_bio()
2004 build_virtual_key(tc->td, block, &key); in process_bio()
2005 if (bio_detain(pool, &key, bio, &cell)) in process_bio()
2008 process_cell(tc, cell); in process_bio()
2012 struct dm_bio_prison_cell *cell) in __process_bio_read_only() argument
2019 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); in __process_bio_read_only()
2022 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) { in __process_bio_read_only()
2023 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2024 if (cell) in __process_bio_read_only()
2025 cell_defer_no_holder(tc, cell); in __process_bio_read_only()
2027 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2029 if (cell) in __process_bio_read_only()
2030 inc_remap_and_issue_cell(tc, cell, lookup_result.block); in __process_bio_read_only()
2034 case -ENODATA: in __process_bio_read_only()
2035 if (cell) in __process_bio_read_only()
2036 cell_defer_no_holder(tc, cell); in __process_bio_read_only()
2038 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2042 if (tc->origin_dev) { in __process_bio_read_only()
2043 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2055 if (cell) in __process_bio_read_only()
2056 cell_defer_no_holder(tc, cell); in __process_bio_read_only()
2067 static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell_read_only() argument
2069 __process_bio_read_only(tc, cell->holder, cell); in process_cell_read_only()
2082 static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell_success() argument
2084 cell_success(tc->pool, cell); in process_cell_success()
2087 static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell_fail() argument
2089 cell_error(tc->pool, cell); in process_cell_fail()
2098 return !time_in_range(jiffies, pool->last_commit_jiffies, in need_commit_due_to_time()
2099 pool->last_commit_jiffies + COMMIT_PERIOD); in need_commit_due_to_time()
2109 sector_t bi_sector = bio->bi_iter.bi_sector; in __thin_bio_rb_add()
2111 rbp = &tc->sort_bio_list.rb_node; in __thin_bio_rb_add()
2117 if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector) in __thin_bio_rb_add()
2118 rbp = &(*rbp)->rb_left; in __thin_bio_rb_add()
2120 rbp = &(*rbp)->rb_right; in __thin_bio_rb_add()
2124 rb_link_node(&pbd->rb_node, parent, rbp); in __thin_bio_rb_add()
2125 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list); in __thin_bio_rb_add()
2134 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) { in __extract_sorted_bios()
2138 bio_list_add(&tc->deferred_bio_list, bio); in __extract_sorted_bios()
2139 rb_erase(&pbd->rb_node, &tc->sort_bio_list); in __extract_sorted_bios()
2142 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list)); in __extract_sorted_bios()
2151 bio_list_merge(&bios, &tc->deferred_bio_list); in __sort_thin_deferred_bios()
2152 bio_list_init(&tc->deferred_bio_list); in __sort_thin_deferred_bios()
2154 /* Sort deferred_bio_list using rb-tree */ in __sort_thin_deferred_bios()
2168 struct pool *pool = tc->pool; in process_thin_deferred_bios()
2172 unsigned count = 0; in process_thin_deferred_bios() local
2174 if (tc->requeue_mode) { in process_thin_deferred_bios()
2175 error_thin_bio_list(tc, &tc->deferred_bio_list, in process_thin_deferred_bios()
2182 spin_lock_irq(&tc->lock); in process_thin_deferred_bios()
2184 if (bio_list_empty(&tc->deferred_bio_list)) { in process_thin_deferred_bios()
2185 spin_unlock_irq(&tc->lock); in process_thin_deferred_bios()
2191 bio_list_merge(&bios, &tc->deferred_bio_list); in process_thin_deferred_bios()
2192 bio_list_init(&tc->deferred_bio_list); in process_thin_deferred_bios()
2194 spin_unlock_irq(&tc->lock); in process_thin_deferred_bios()
2204 spin_lock_irq(&tc->lock); in process_thin_deferred_bios()
2205 bio_list_add(&tc->deferred_bio_list, bio); in process_thin_deferred_bios()
2206 bio_list_merge(&tc->deferred_bio_list, &bios); in process_thin_deferred_bios()
2207 spin_unlock_irq(&tc->lock); in process_thin_deferred_bios()
2212 pool->process_discard(tc, bio); in process_thin_deferred_bios()
2214 pool->process_bio(tc, bio); in process_thin_deferred_bios()
2216 if ((count++ & 127) == 0) { in process_thin_deferred_bios()
2217 throttle_work_update(&pool->throttle); in process_thin_deferred_bios()
2218 dm_pool_issue_prefetches(pool->pmd); in process_thin_deferred_bios()
2230 BUG_ON(!lhs_cell->holder); in cmp_cells()
2231 BUG_ON(!rhs_cell->holder); in cmp_cells()
2233 if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector) in cmp_cells()
2234 return -1; in cmp_cells()
2236 if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector) in cmp_cells()
2244 unsigned count = 0; in sort_cells() local
2245 struct dm_bio_prison_cell *cell, *tmp; in sort_cells() local
2247 list_for_each_entry_safe(cell, tmp, cells, user_list) { in sort_cells()
2248 if (count >= CELL_SORT_ARRAY_SIZE) in sort_cells()
2251 pool->cell_sort_array[count++] = cell; in sort_cells()
2252 list_del(&cell->user_list); in sort_cells()
2255 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL); in sort_cells()
2257 return count; in sort_cells()
2262 struct pool *pool = tc->pool; in process_thin_deferred_cells()
2264 struct dm_bio_prison_cell *cell; in process_thin_deferred_cells() local
2265 unsigned i, j, count; in process_thin_deferred_cells() local
2269 spin_lock_irq(&tc->lock); in process_thin_deferred_cells()
2270 list_splice_init(&tc->deferred_cells, &cells); in process_thin_deferred_cells()
2271 spin_unlock_irq(&tc->lock); in process_thin_deferred_cells()
2277 count = sort_cells(tc->pool, &cells); in process_thin_deferred_cells()
2279 for (i = 0; i < count; i++) { in process_thin_deferred_cells()
2280 cell = pool->cell_sort_array[i]; in process_thin_deferred_cells()
2281 BUG_ON(!cell->holder); in process_thin_deferred_cells()
2289 for (j = i; j < count; j++) in process_thin_deferred_cells()
2290 list_add(&pool->cell_sort_array[j]->user_list, &cells); in process_thin_deferred_cells()
2292 spin_lock_irq(&tc->lock); in process_thin_deferred_cells()
2293 list_splice(&cells, &tc->deferred_cells); in process_thin_deferred_cells()
2294 spin_unlock_irq(&tc->lock); in process_thin_deferred_cells()
2298 if (bio_op(cell->holder) == REQ_OP_DISCARD) in process_thin_deferred_cells()
2299 pool->process_discard_cell(tc, cell); in process_thin_deferred_cells()
2301 pool->process_cell(tc, cell); in process_thin_deferred_cells()
2320 if (!list_empty(&pool->active_thins)) { in get_first_thin()
2321 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); in get_first_thin()
2334 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { in get_next_thin()
2366 spin_lock_irq(&pool->lock); in process_deferred_bios()
2367 bio_list_merge(&bios, &pool->deferred_flush_bios); in process_deferred_bios()
2368 bio_list_init(&pool->deferred_flush_bios); in process_deferred_bios()
2370 bio_list_merge(&bio_completions, &pool->deferred_flush_completions); in process_deferred_bios()
2371 bio_list_init(&pool->deferred_flush_completions); in process_deferred_bios()
2372 spin_unlock_irq(&pool->lock); in process_deferred_bios()
2375 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) in process_deferred_bios()
2385 pool->last_commit_jiffies = jiffies; in process_deferred_bios()
2395 if (bio->bi_opf & REQ_PREFLUSH) in process_deferred_bios()
2406 throttle_work_start(&pool->throttle); in do_worker()
2407 dm_pool_issue_prefetches(pool->pmd); in do_worker()
2408 throttle_work_update(&pool->throttle); in do_worker()
2409 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping); in do_worker()
2410 throttle_work_update(&pool->throttle); in do_worker()
2411 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard); in do_worker()
2412 throttle_work_update(&pool->throttle); in do_worker()
2413 process_prepared(pool, &pool->prepared_discards_pt2, &pool->process_prepared_discard_pt2); in do_worker()
2414 throttle_work_update(&pool->throttle); in do_worker()
2416 throttle_work_complete(&pool->throttle); in do_worker()
2427 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); in do_waker()
2440 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { in do_no_space_timeout()
2441 pool->pf.error_if_no_space = true; in do_no_space_timeout()
2447 /*----------------------------------------------------------------*/
2461 complete(&pw->complete); in pool_work_complete()
2467 INIT_WORK_ONSTACK(&pw->worker, fn); in pool_work_wait()
2468 init_completion(&pw->complete); in pool_work_wait()
2469 queue_work(pool->wq, &pw->worker); in pool_work_wait()
2470 wait_for_completion(&pw->complete); in pool_work_wait()
2473 /*----------------------------------------------------------------*/
2488 w->tc->requeue_mode = true; in do_noflush_start()
2489 requeue_io(w->tc); in do_noflush_start()
2490 pool_work_complete(&w->pw); in do_noflush_start()
2496 w->tc->requeue_mode = false; in do_noflush_stop()
2497 pool_work_complete(&w->pw); in do_noflush_stop()
2505 pool_work_wait(&w.pw, tc->pool, fn); in noflush_work()
2508 /*----------------------------------------------------------------*/
2512 return pt->adjusted_pf.discard_passdown; in passdown_enabled()
2517 struct pool_c *pt = pool->ti->private; in set_discard_callbacks()
2520 pool->process_discard_cell = process_discard_cell_passdown; in set_discard_callbacks()
2521 pool->process_prepared_discard = process_prepared_discard_passdown_pt1; in set_discard_callbacks()
2522 pool->process_prepared_discard_pt2 = process_prepared_discard_passdown_pt2; in set_discard_callbacks()
2524 pool->process_discard_cell = process_discard_cell_no_passdown; in set_discard_callbacks()
2525 pool->process_prepared_discard = process_prepared_discard_no_passdown; in set_discard_callbacks()
2531 struct pool_c *pt = pool->ti->private; in set_pool_mode()
2532 bool needs_check = dm_pool_metadata_needs_check(pool->pmd); in set_pool_mode()
2542 dm_device_name(pool->pool_md)); in set_pool_mode()
2558 dm_pool_metadata_read_only(pool->pmd); in set_pool_mode()
2559 pool->process_bio = process_bio_fail; in set_pool_mode()
2560 pool->process_discard = process_bio_fail; in set_pool_mode()
2561 pool->process_cell = process_cell_fail; in set_pool_mode()
2562 pool->process_discard_cell = process_cell_fail; in set_pool_mode()
2563 pool->process_prepared_mapping = process_prepared_mapping_fail; in set_pool_mode()
2564 pool->process_prepared_discard = process_prepared_discard_fail; in set_pool_mode()
2571 dm_pool_metadata_read_only(pool->pmd); in set_pool_mode()
2572 pool->process_bio = process_bio_read_only; in set_pool_mode()
2573 pool->process_discard = process_bio_success; in set_pool_mode()
2574 pool->process_cell = process_cell_read_only; in set_pool_mode()
2575 pool->process_discard_cell = process_cell_success; in set_pool_mode()
2576 pool->process_prepared_mapping = process_prepared_mapping_fail; in set_pool_mode()
2577 pool->process_prepared_discard = process_prepared_discard_success; in set_pool_mode()
2591 pool->out_of_data_space = true; in set_pool_mode()
2592 pool->process_bio = process_bio_read_only; in set_pool_mode()
2593 pool->process_discard = process_discard_bio; in set_pool_mode()
2594 pool->process_cell = process_cell_read_only; in set_pool_mode()
2595 pool->process_prepared_mapping = process_prepared_mapping; in set_pool_mode()
2598 if (!pool->pf.error_if_no_space && no_space_timeout) in set_pool_mode()
2599 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); in set_pool_mode()
2604 cancel_delayed_work_sync(&pool->no_space_timeout); in set_pool_mode()
2605 pool->out_of_data_space = false; in set_pool_mode()
2606 pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; in set_pool_mode()
2607 dm_pool_metadata_read_write(pool->pmd); in set_pool_mode()
2608 pool->process_bio = process_bio; in set_pool_mode()
2609 pool->process_discard = process_discard_bio; in set_pool_mode()
2610 pool->process_cell = process_cell; in set_pool_mode()
2611 pool->process_prepared_mapping = process_prepared_mapping; in set_pool_mode()
2616 pool->pf.mode = new_mode; in set_pool_mode()
2621 pt->adjusted_pf.mode = new_mode; in set_pool_mode()
2629 const char *dev_name = dm_device_name(pool->pool_md); in abort_transaction()
2632 if (dm_pool_abort_metadata(pool->pmd)) { in abort_transaction()
2637 if (dm_pool_metadata_set_needs_check(pool->pmd)) { in abort_transaction()
2646 dm_device_name(pool->pool_md), op, r); in metadata_operation_failed()
2652 /*----------------------------------------------------------------*/
2663 struct pool *pool = tc->pool; in thin_defer_bio()
2665 spin_lock_irq(&tc->lock); in thin_defer_bio()
2666 bio_list_add(&tc->deferred_bio_list, bio); in thin_defer_bio()
2667 spin_unlock_irq(&tc->lock); in thin_defer_bio()
2674 struct pool *pool = tc->pool; in thin_defer_bio_with_throttle()
2676 throttle_lock(&pool->throttle); in thin_defer_bio_with_throttle()
2678 throttle_unlock(&pool->throttle); in thin_defer_bio_with_throttle()
2681 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) in thin_defer_cell() argument
2683 struct pool *pool = tc->pool; in thin_defer_cell()
2685 throttle_lock(&pool->throttle); in thin_defer_cell()
2686 spin_lock_irq(&tc->lock); in thin_defer_cell()
2687 list_add_tail(&cell->user_list, &tc->deferred_cells); in thin_defer_cell()
2688 spin_unlock_irq(&tc->lock); in thin_defer_cell()
2689 throttle_unlock(&pool->throttle); in thin_defer_cell()
2698 h->tc = tc; in thin_hook_bio()
2699 h->shared_read_entry = NULL; in thin_hook_bio()
2700 h->all_io_entry = NULL; in thin_hook_bio()
2701 h->overwrite_mapping = NULL; in thin_hook_bio()
2702 h->cell = NULL; in thin_hook_bio()
2706 * Non-blocking function called from the thin target's map function.
2711 struct thin_c *tc = ti->private; in thin_bio_map()
2713 struct dm_thin_device *td = tc->td; in thin_bio_map()
2720 if (tc->requeue_mode) { in thin_bio_map()
2721 bio->bi_status = BLK_STS_DM_REQUEUE; in thin_bio_map()
2726 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_bio_map()
2731 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) { in thin_bio_map()
2737 * We must hold the virtual cell before doing the lookup, otherwise in thin_bio_map()
2740 build_virtual_key(tc->td, block, &key); in thin_bio_map()
2741 if (bio_detain(tc->pool, &key, bio, &virt_cell)) in thin_bio_map()
2770 build_data_key(tc->td, result.block, &key); in thin_bio_map()
2771 if (bio_detain(tc->pool, &key, bio, &data_cell)) { in thin_bio_map()
2776 inc_all_io_entry(tc->pool, bio); in thin_bio_map()
2783 case -ENODATA: in thin_bio_map()
2784 case -EWOULDBLOCK: in thin_bio_map()
2791 * dm_thin_find_block can fail with -EINVAL if the in thin_bio_map()
2792 * pool is switched to fail-io mode. in thin_bio_map()
2805 list_for_each_entry_rcu(tc, &pool->active_thins, list) { in requeue_bios()
2806 spin_lock_irq(&tc->lock); in requeue_bios()
2807 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list); in requeue_bios()
2808 bio_list_init(&tc->retry_on_resume_list); in requeue_bios()
2809 spin_unlock_irq(&tc->lock); in requeue_bios()
2814 /*----------------------------------------------------------------
2816 *--------------------------------------------------------------*/
2819 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); in data_dev_supports_discard()
2835 struct pool *pool = pt->pool; in disable_passdown_if_not_supported()
2836 struct block_device *data_bdev = pt->data_dev->bdev; in disable_passdown_if_not_supported()
2837 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits; in disable_passdown_if_not_supported()
2841 if (!pt->adjusted_pf.discard_passdown) in disable_passdown_if_not_supported()
2847 else if (data_limits->max_discard_sectors < pool->sectors_per_block) in disable_passdown_if_not_supported()
2852 pt->adjusted_pf.discard_passdown = false; in disable_passdown_if_not_supported()
2858 struct pool_c *pt = ti->private; in bind_control_target()
2864 enum pool_mode new_mode = pt->adjusted_pf.mode; in bind_control_target()
2871 pt->adjusted_pf.mode = old_mode; in bind_control_target()
2873 pool->ti = ti; in bind_control_target()
2874 pool->pf = pt->adjusted_pf; in bind_control_target()
2875 pool->low_water_blocks = pt->low_water_blocks; in bind_control_target()
2884 if (pool->ti == ti) in unbind_control_target()
2885 pool->ti = NULL; in unbind_control_target()
2888 /*----------------------------------------------------------------
2890 *--------------------------------------------------------------*/
2894 pf->mode = PM_WRITE; in pool_features_init()
2895 pf->zero_new_blocks = true; in pool_features_init()
2896 pf->discard_enabled = true; in pool_features_init()
2897 pf->discard_passdown = true; in pool_features_init()
2898 pf->error_if_no_space = false; in pool_features_init()
2905 vfree(pool->cell_sort_array); in __pool_destroy()
2906 if (dm_pool_metadata_close(pool->pmd) < 0) in __pool_destroy()
2909 dm_bio_prison_destroy(pool->prison); in __pool_destroy()
2910 dm_kcopyd_client_destroy(pool->copier); in __pool_destroy()
2912 cancel_delayed_work_sync(&pool->waker); in __pool_destroy()
2913 cancel_delayed_work_sync(&pool->no_space_timeout); in __pool_destroy()
2914 if (pool->wq) in __pool_destroy()
2915 destroy_workqueue(pool->wq); in __pool_destroy()
2917 if (pool->next_mapping) in __pool_destroy()
2918 mempool_free(pool->next_mapping, &pool->mapping_pool); in __pool_destroy()
2919 mempool_exit(&pool->mapping_pool); in __pool_destroy()
2920 bio_uninit(&pool->flush_bio); in __pool_destroy()
2921 dm_deferred_set_destroy(pool->shared_read_ds); in __pool_destroy()
2922 dm_deferred_set_destroy(pool->all_io_ds); in __pool_destroy()
2949 err_p = ERR_PTR(-ENOMEM); in pool_create()
2953 pool->pmd = pmd; in pool_create()
2954 pool->sectors_per_block = block_size; in pool_create()
2955 if (block_size & (block_size - 1)) in pool_create()
2956 pool->sectors_per_block_shift = -1; in pool_create()
2958 pool->sectors_per_block_shift = __ffs(block_size); in pool_create()
2959 pool->low_water_blocks = 0; in pool_create()
2960 pool_features_init(&pool->pf); in pool_create()
2961 pool->prison = dm_bio_prison_create(); in pool_create()
2962 if (!pool->prison) { in pool_create()
2964 err_p = ERR_PTR(-ENOMEM); in pool_create()
2968 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); in pool_create()
2969 if (IS_ERR(pool->copier)) { in pool_create()
2970 r = PTR_ERR(pool->copier); in pool_create()
2980 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); in pool_create()
2981 if (!pool->wq) { in pool_create()
2983 err_p = ERR_PTR(-ENOMEM); in pool_create()
2987 throttle_init(&pool->throttle); in pool_create()
2988 INIT_WORK(&pool->worker, do_worker); in pool_create()
2989 INIT_DELAYED_WORK(&pool->waker, do_waker); in pool_create()
2990 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); in pool_create()
2991 spin_lock_init(&pool->lock); in pool_create()
2992 bio_list_init(&pool->deferred_flush_bios); in pool_create()
2993 bio_list_init(&pool->deferred_flush_completions); in pool_create()
2994 INIT_LIST_HEAD(&pool->prepared_mappings); in pool_create()
2995 INIT_LIST_HEAD(&pool->prepared_discards); in pool_create()
2996 INIT_LIST_HEAD(&pool->prepared_discards_pt2); in pool_create()
2997 INIT_LIST_HEAD(&pool->active_thins); in pool_create()
2998 pool->low_water_triggered = false; in pool_create()
2999 pool->suspended = true; in pool_create()
3000 pool->out_of_data_space = false; in pool_create()
3001 bio_init(&pool->flush_bio, NULL, 0); in pool_create()
3003 pool->shared_read_ds = dm_deferred_set_create(); in pool_create()
3004 if (!pool->shared_read_ds) { in pool_create()
3006 err_p = ERR_PTR(-ENOMEM); in pool_create()
3010 pool->all_io_ds = dm_deferred_set_create(); in pool_create()
3011 if (!pool->all_io_ds) { in pool_create()
3013 err_p = ERR_PTR(-ENOMEM); in pool_create()
3017 pool->next_mapping = NULL; in pool_create()
3018 r = mempool_init_slab_pool(&pool->mapping_pool, MAPPING_POOL_SIZE, in pool_create()
3026 pool->cell_sort_array = in pool_create()
3028 sizeof(*pool->cell_sort_array))); in pool_create()
3029 if (!pool->cell_sort_array) { in pool_create()
3030 *error = "Error allocating cell sort array"; in pool_create()
3031 err_p = ERR_PTR(-ENOMEM); in pool_create()
3035 pool->ref_count = 1; in pool_create()
3036 pool->last_commit_jiffies = jiffies; in pool_create()
3037 pool->pool_md = pool_md; in pool_create()
3038 pool->md_dev = metadata_dev; in pool_create()
3039 pool->data_dev = data_dev; in pool_create()
3045 mempool_exit(&pool->mapping_pool); in pool_create()
3047 dm_deferred_set_destroy(pool->all_io_ds); in pool_create()
3049 dm_deferred_set_destroy(pool->shared_read_ds); in pool_create()
3051 destroy_workqueue(pool->wq); in pool_create()
3053 dm_kcopyd_client_destroy(pool->copier); in pool_create()
3055 dm_bio_prison_destroy(pool->prison); in pool_create()
3068 pool->ref_count++; in __pool_inc()
3074 BUG_ON(!pool->ref_count); in __pool_dec()
3075 if (!--pool->ref_count) in __pool_dec()
3088 if (pool->pool_md != pool_md) { in __pool_find()
3090 return ERR_PTR(-EBUSY); in __pool_find()
3092 if (pool->data_dev != data_dev) { in __pool_find()
3094 return ERR_PTR(-EBUSY); in __pool_find()
3101 if (pool->md_dev != metadata_dev || pool->data_dev != data_dev) { in __pool_find()
3103 return ERR_PTR(-EINVAL); in __pool_find()
3116 /*----------------------------------------------------------------
3118 *--------------------------------------------------------------*/
3121 struct pool_c *pt = ti->private; in pool_dtr()
3125 unbind_control_target(pt->pool, ti); in pool_dtr()
3126 __pool_dec(pt->pool); in pool_dtr()
3127 dm_put_device(ti, pt->metadata_dev); in pool_dtr()
3128 dm_put_device(ti, pt->data_dev); in pool_dtr()
3148 if (!as->argc) in parse_pool_features()
3151 r = dm_read_arg_group(_args, as, &argc, &ti->error); in parse_pool_features()
3153 return -EINVAL; in parse_pool_features()
3157 argc--; in parse_pool_features()
3160 pf->zero_new_blocks = false; in parse_pool_features()
3163 pf->discard_enabled = false; in parse_pool_features()
3166 pf->discard_passdown = false; in parse_pool_features()
3169 pf->mode = PM_READ_ONLY; in parse_pool_features()
3172 pf->error_if_no_space = true; in parse_pool_features()
3175 ti->error = "Unrecognised pool feature requested"; in parse_pool_features()
3176 r = -EINVAL; in parse_pool_features()
3189 dm_device_name(pool->pool_md)); in metadata_low_callback()
3191 dm_table_event(pool->ti->table); in metadata_low_callback()
3198 * properly written to non-volatile storage and won't be lost in case of a
3208 struct bio *flush_bio = &pool->flush_bio; in metadata_pre_commit_callback()
3211 bio_set_dev(flush_bio, pool->data_dev); in metadata_pre_commit_callback()
3212 flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; in metadata_pre_commit_callback()
3219 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; in get_dev_size()
3264 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4; in calc_metadata_threshold()
3269 * thin-pool <metadata dev> <data dev>
3275 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
3300 ti->error = "Invalid argument count"; in pool_ctr()
3301 r = -EINVAL; in pool_ctr()
3310 ti->error = "Error setting metadata or data device"; in pool_ctr()
3311 r = -EINVAL; in pool_ctr()
3328 ti->error = "Error opening metadata block device"; in pool_ctr()
3331 warn_if_metadata_device_too_big(metadata_dev->bdev); in pool_ctr()
3335 ti->error = "Error getting data device"; in pool_ctr()
3342 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) { in pool_ctr()
3343 ti->error = "Invalid block size"; in pool_ctr()
3344 r = -EINVAL; in pool_ctr()
3349 ti->error = "Invalid low water mark"; in pool_ctr()
3350 r = -EINVAL; in pool_ctr()
3356 r = -ENOMEM; in pool_ctr()
3360 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev, in pool_ctr()
3361 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created); in pool_ctr()
3373 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) { in pool_ctr()
3374 ti->error = "Discard support cannot be disabled once enabled"; in pool_ctr()
3375 r = -EINVAL; in pool_ctr()
3379 pt->pool = pool; in pool_ctr()
3380 pt->ti = ti; in pool_ctr()
3381 pt->metadata_dev = metadata_dev; in pool_ctr()
3382 pt->data_dev = data_dev; in pool_ctr()
3383 pt->low_water_blocks = low_water_blocks; in pool_ctr()
3384 pt->adjusted_pf = pt->requested_pf = pf; in pool_ctr()
3385 ti->num_flush_bios = 1; in pool_ctr()
3386 ti->limit_swap_bios = true; in pool_ctr()
3394 ti->num_discard_bios = 1; in pool_ctr()
3401 ti->discards_supported = true; in pool_ctr()
3403 ti->private = pt; in pool_ctr()
3405 r = dm_pool_register_metadata_threshold(pt->pool->pmd, in pool_ctr()
3410 ti->error = "Error registering metadata threshold"; in pool_ctr()
3414 dm_pool_register_pre_commit_callback(pool->pmd, in pool_ctr()
3438 struct pool_c *pt = ti->private; in pool_map()
3439 struct pool *pool = pt->pool; in pool_map()
3442 * As this is a singleton target, ti->begin is always zero. in pool_map()
3444 spin_lock_irq(&pool->lock); in pool_map()
3445 bio_set_dev(bio, pt->data_dev->bdev); in pool_map()
3447 spin_unlock_irq(&pool->lock); in pool_map()
3455 struct pool_c *pt = ti->private; in maybe_resize_data_dev()
3456 struct pool *pool = pt->pool; in maybe_resize_data_dev()
3457 sector_t data_size = ti->len; in maybe_resize_data_dev()
3462 (void) sector_div(data_size, pool->sectors_per_block); in maybe_resize_data_dev()
3464 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size); in maybe_resize_data_dev()
3467 dm_device_name(pool->pool_md)); in maybe_resize_data_dev()
3473 dm_device_name(pool->pool_md), in maybe_resize_data_dev()
3475 return -EINVAL; in maybe_resize_data_dev()
3478 if (dm_pool_metadata_needs_check(pool->pmd)) { in maybe_resize_data_dev()
3480 dm_device_name(pool->pool_md)); in maybe_resize_data_dev()
3486 dm_device_name(pool->pool_md), in maybe_resize_data_dev()
3488 r = dm_pool_resize_data_dev(pool->pmd, data_size); in maybe_resize_data_dev()
3503 struct pool_c *pt = ti->private; in maybe_resize_metadata_dev()
3504 struct pool *pool = pt->pool; in maybe_resize_metadata_dev()
3509 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev); in maybe_resize_metadata_dev()
3511 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); in maybe_resize_metadata_dev()
3514 dm_device_name(pool->pool_md)); in maybe_resize_metadata_dev()
3520 dm_device_name(pool->pool_md), in maybe_resize_metadata_dev()
3522 return -EINVAL; in maybe_resize_metadata_dev()
3525 if (dm_pool_metadata_needs_check(pool->pmd)) { in maybe_resize_metadata_dev()
3527 dm_device_name(pool->pool_md)); in maybe_resize_metadata_dev()
3531 warn_if_metadata_device_too_big(pool->md_dev); in maybe_resize_metadata_dev()
3533 dm_device_name(pool->pool_md), in maybe_resize_metadata_dev()
3539 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); in maybe_resize_metadata_dev()
3558 * -and-
3566 struct pool_c *pt = ti->private; in pool_preresume()
3567 struct pool *pool = pt->pool; in pool_preresume()
3588 * When a thin-pool is PM_FAIL, it cannot be rebuilt if in pool_preresume()
3605 dm_internal_suspend_noflush(tc->thin_md); in pool_suspend_active_thins()
3617 dm_internal_resume(tc->thin_md); in pool_resume_active_thins()
3624 struct pool_c *pt = ti->private; in pool_resume()
3625 struct pool *pool = pt->pool; in pool_resume()
3634 spin_lock_irq(&pool->lock); in pool_resume()
3635 pool->low_water_triggered = false; in pool_resume()
3636 pool->suspended = false; in pool_resume()
3637 spin_unlock_irq(&pool->lock); in pool_resume()
3639 do_waker(&pool->waker.work); in pool_resume()
3644 struct pool_c *pt = ti->private; in pool_presuspend()
3645 struct pool *pool = pt->pool; in pool_presuspend()
3647 spin_lock_irq(&pool->lock); in pool_presuspend()
3648 pool->suspended = true; in pool_presuspend()
3649 spin_unlock_irq(&pool->lock); in pool_presuspend()
3656 struct pool_c *pt = ti->private; in pool_presuspend_undo()
3657 struct pool *pool = pt->pool; in pool_presuspend_undo()
3661 spin_lock_irq(&pool->lock); in pool_presuspend_undo()
3662 pool->suspended = false; in pool_presuspend_undo()
3663 spin_unlock_irq(&pool->lock); in pool_presuspend_undo()
3668 struct pool_c *pt = ti->private; in pool_postsuspend()
3669 struct pool *pool = pt->pool; in pool_postsuspend()
3671 cancel_delayed_work_sync(&pool->waker); in pool_postsuspend()
3672 cancel_delayed_work_sync(&pool->no_space_timeout); in pool_postsuspend()
3673 flush_workqueue(pool->wq); in pool_postsuspend()
3682 return -EINVAL; in check_arg_count()
3697 return -EINVAL; in read_dev_id()
3713 r = dm_pool_create_thin(pool->pmd, dev_id); in process_create_thin_mesg()
3715 DMWARN("Creation of new thinly-provisioned device with id %s failed.", in process_create_thin_mesg()
3741 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id); in process_create_snap_mesg()
3764 r = dm_pool_delete_thin_device(pool->pmd, dev_id); in process_delete_mesg()
3782 return -EINVAL; in process_set_transaction_id_mesg()
3787 return -EINVAL; in process_set_transaction_id_mesg()
3790 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id); in process_set_transaction_id_mesg()
3810 r = dm_pool_reserve_metadata_snap(pool->pmd); in process_reserve_metadata_snap_mesg()
3825 r = dm_pool_release_metadata_snap(pool->pmd); in process_release_metadata_snap_mesg()
3844 int r = -EINVAL; in pool_message()
3845 struct pool_c *pt = ti->private; in pool_message()
3846 struct pool *pool = pt->pool; in pool_message()
3850 dm_device_name(pool->pool_md)); in pool_message()
3851 return -EOPNOTSUPP; in pool_message()
3884 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled + in emit_flags() local
3885 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) + in emit_flags()
3886 pf->error_if_no_space; in emit_flags()
3887 DMEMIT("%u ", count); in emit_flags()
3889 if (!pf->zero_new_blocks) in emit_flags()
3892 if (!pf->discard_enabled) in emit_flags()
3895 if (!pf->discard_passdown) in emit_flags()
3898 if (pf->mode == PM_READ_ONLY) in emit_flags()
3901 if (pf->error_if_no_space) in emit_flags()
3925 struct pool_c *pt = ti->private; in pool_status()
3926 struct pool *pool = pt->pool; in pool_status()
3935 /* Commit to ensure statistics aren't out-of-date */ in pool_status()
3939 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); in pool_status()
3942 dm_device_name(pool->pool_md), r); in pool_status()
3946 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata); in pool_status()
3949 dm_device_name(pool->pool_md), r); in pool_status()
3953 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata); in pool_status()
3956 dm_device_name(pool->pool_md), r); in pool_status()
3960 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data); in pool_status()
3963 dm_device_name(pool->pool_md), r); in pool_status()
3967 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data); in pool_status()
3970 dm_device_name(pool->pool_md), r); in pool_status()
3974 r = dm_pool_get_metadata_snap(pool->pmd, &held_root); in pool_status()
3977 dm_device_name(pool->pool_md), r); in pool_status()
3983 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata), in pool_status()
3985 (unsigned long long)(nr_blocks_data - nr_free_blocks_data), in pool_status()
3991 DMEMIT("- "); in pool_status()
4001 if (!pool->pf.discard_enabled) in pool_status()
4003 else if (pool->pf.discard_passdown) in pool_status()
4008 if (pool->pf.error_if_no_space) in pool_status()
4013 if (dm_pool_metadata_needs_check(pool->pmd)) in pool_status()
4016 DMEMIT("- "); in pool_status()
4024 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev), in pool_status()
4025 format_dev_t(buf2, pt->data_dev->bdev->bd_dev), in pool_status()
4026 (unsigned long)pool->sectors_per_block, in pool_status()
4027 (unsigned long long)pt->low_water_blocks); in pool_status()
4028 emit_flags(&pt->requested_pf, result, sz, maxlen); in pool_status()
4040 struct pool_c *pt = ti->private; in pool_iterate_devices()
4042 return fn(ti, pt->data_dev, 0, ti->len, data); in pool_iterate_devices()
4047 struct pool_c *pt = ti->private; in pool_io_hints()
4048 struct pool *pool = pt->pool; in pool_io_hints()
4049 sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT; in pool_io_hints()
4052 * If max_sectors is smaller than pool->sectors_per_block adjust it in pool_io_hints()
4053 * to the highest possible power-of-2 factor of pool->sectors_per_block. in pool_io_hints()
4055 * device that has a full stripe width that matches pool->sectors_per_block in pool_io_hints()
4056 * -- because even though partial RAID stripe-sized IOs will be issued to a in pool_io_hints()
4060 if (limits->max_sectors < pool->sectors_per_block) { in pool_io_hints()
4061 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) { in pool_io_hints()
4062 if ((limits->max_sectors & (limits->max_sectors - 1)) == 0) in pool_io_hints()
4063 limits->max_sectors--; in pool_io_hints()
4064 limits->max_sectors = rounddown_pow_of_two(limits->max_sectors); in pool_io_hints()
4069 * If the system-determined stacked limits are compatible with the in pool_io_hints()
4072 if (io_opt_sectors < pool->sectors_per_block || in pool_io_hints()
4073 !is_factor(io_opt_sectors, pool->sectors_per_block)) { in pool_io_hints()
4074 if (is_factor(pool->sectors_per_block, limits->max_sectors)) in pool_io_hints()
4075 blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT); in pool_io_hints()
4077 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT); in pool_io_hints()
4078 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); in pool_io_hints()
4082 * pt->adjusted_pf is a staging area for the actual features to use. in pool_io_hints()
4086 if (!pt->adjusted_pf.discard_enabled) { in pool_io_hints()
4093 limits->discard_granularity = 0; in pool_io_hints()
4106 .name = "thin-pool",
4125 /*----------------------------------------------------------------
4127 *--------------------------------------------------------------*/
4130 refcount_inc(&tc->refcount); in thin_get()
4135 if (refcount_dec_and_test(&tc->refcount)) in thin_put()
4136 complete(&tc->can_destroy); in thin_put()
4141 struct thin_c *tc = ti->private; in thin_dtr()
4143 spin_lock_irq(&tc->pool->lock); in thin_dtr()
4144 list_del_rcu(&tc->list); in thin_dtr()
4145 spin_unlock_irq(&tc->pool->lock); in thin_dtr()
4149 wait_for_completion(&tc->can_destroy); in thin_dtr()
4153 __pool_dec(tc->pool); in thin_dtr()
4154 dm_pool_close_thin_device(tc->td); in thin_dtr()
4155 dm_put_device(ti, tc->pool_dev); in thin_dtr()
4156 if (tc->origin_dev) in thin_dtr()
4157 dm_put_device(ti, tc->origin_dev); in thin_dtr()
4185 ti->error = "Invalid argument count"; in thin_ctr()
4186 r = -EINVAL; in thin_ctr()
4190 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL); in thin_ctr()
4192 ti->error = "Out of memory"; in thin_ctr()
4193 r = -ENOMEM; in thin_ctr()
4196 tc->thin_md = dm_table_get_md(ti->table); in thin_ctr()
4197 spin_lock_init(&tc->lock); in thin_ctr()
4198 INIT_LIST_HEAD(&tc->deferred_cells); in thin_ctr()
4199 bio_list_init(&tc->deferred_bio_list); in thin_ctr()
4200 bio_list_init(&tc->retry_on_resume_list); in thin_ctr()
4201 tc->sort_bio_list = RB_ROOT; in thin_ctr()
4205 ti->error = "Error setting origin device"; in thin_ctr()
4206 r = -EINVAL; in thin_ctr()
4212 ti->error = "Error opening origin device"; in thin_ctr()
4215 tc->origin_dev = origin_dev; in thin_ctr()
4218 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev); in thin_ctr()
4220 ti->error = "Error opening pool device"; in thin_ctr()
4223 tc->pool_dev = pool_dev; in thin_ctr()
4225 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) { in thin_ctr()
4226 ti->error = "Invalid device id"; in thin_ctr()
4227 r = -EINVAL; in thin_ctr()
4231 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev); in thin_ctr()
4233 ti->error = "Couldn't get pool mapped device"; in thin_ctr()
4234 r = -EINVAL; in thin_ctr()
4238 tc->pool = __pool_table_lookup(pool_md); in thin_ctr()
4239 if (!tc->pool) { in thin_ctr()
4240 ti->error = "Couldn't find pool object"; in thin_ctr()
4241 r = -EINVAL; in thin_ctr()
4244 __pool_inc(tc->pool); in thin_ctr()
4246 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_ctr()
4247 ti->error = "Couldn't open thin device, Pool is in fail mode"; in thin_ctr()
4248 r = -EINVAL; in thin_ctr()
4252 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); in thin_ctr()
4254 ti->error = "Couldn't open thin internal device"; in thin_ctr()
4258 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); in thin_ctr()
4262 ti->num_flush_bios = 1; in thin_ctr()
4263 ti->limit_swap_bios = true; in thin_ctr()
4264 ti->flush_supported = true; in thin_ctr()
4265 ti->per_io_data_size = sizeof(struct dm_thin_endio_hook); in thin_ctr()
4268 if (tc->pool->pf.discard_enabled) { in thin_ctr()
4269 ti->discards_supported = true; in thin_ctr()
4270 ti->num_discard_bios = 1; in thin_ctr()
4275 spin_lock_irq(&tc->pool->lock); in thin_ctr()
4276 if (tc->pool->suspended) { in thin_ctr()
4277 spin_unlock_irq(&tc->pool->lock); in thin_ctr()
4279 ti->error = "Unable to activate thin device while pool is suspended"; in thin_ctr()
4280 r = -EINVAL; in thin_ctr()
4283 refcount_set(&tc->refcount, 1); in thin_ctr()
4284 init_completion(&tc->can_destroy); in thin_ctr()
4285 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); in thin_ctr()
4286 spin_unlock_irq(&tc->pool->lock); in thin_ctr()
4300 dm_pool_close_thin_device(tc->td); in thin_ctr()
4302 __pool_dec(tc->pool); in thin_ctr()
4306 dm_put_device(ti, tc->pool_dev); in thin_ctr()
4308 if (tc->origin_dev) in thin_ctr()
4309 dm_put_device(ti, tc->origin_dev); in thin_ctr()
4320 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); in thin_map()
4332 struct pool *pool = h->tc->pool; in thin_endio()
4334 if (h->shared_read_entry) { in thin_endio()
4336 dm_deferred_entry_dec(h->shared_read_entry, &work); in thin_endio()
4338 spin_lock_irqsave(&pool->lock, flags); in thin_endio()
4340 list_del(&m->list); in thin_endio()
4343 spin_unlock_irqrestore(&pool->lock, flags); in thin_endio()
4346 if (h->all_io_entry) { in thin_endio()
4348 dm_deferred_entry_dec(h->all_io_entry, &work); in thin_endio()
4350 spin_lock_irqsave(&pool->lock, flags); in thin_endio()
4352 list_add_tail(&m->list, &pool->prepared_discards); in thin_endio()
4353 spin_unlock_irqrestore(&pool->lock, flags); in thin_endio()
4358 if (h->cell) in thin_endio()
4359 cell_defer_no_holder(h->tc, h->cell); in thin_endio()
4366 struct thin_c *tc = ti->private; in thin_presuspend()
4374 struct thin_c *tc = ti->private; in thin_postsuspend()
4385 struct thin_c *tc = ti->private; in thin_preresume()
4387 if (tc->origin_dev) in thin_preresume()
4388 tc->origin_size = get_dev_size(tc->origin_dev->bdev); in thin_preresume()
4403 struct thin_c *tc = ti->private; in thin_status()
4405 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_status()
4410 if (!tc->td) in thin_status()
4411 DMEMIT("-"); in thin_status()
4415 r = dm_thin_get_mapped_count(tc->td, &mapped); in thin_status()
4421 r = dm_thin_get_highest_mapped_block(tc->td, &highest); in thin_status()
4427 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block); in thin_status()
4430 tc->pool->sectors_per_block) - 1); in thin_status()
4432 DMEMIT("-"); in thin_status()
4437 format_dev_t(buf, tc->pool_dev->bdev->bd_dev), in thin_status()
4438 (unsigned long) tc->dev_id); in thin_status()
4439 if (tc->origin_dev) in thin_status()
4440 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev)); in thin_status()
4455 struct thin_c *tc = ti->private; in thin_iterate_devices()
4456 struct pool *pool = tc->pool; in thin_iterate_devices()
4462 if (!pool->ti) in thin_iterate_devices()
4465 blocks = pool->ti->len; in thin_iterate_devices()
4466 (void) sector_div(blocks, pool->sectors_per_block); in thin_iterate_devices()
4468 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); in thin_iterate_devices()
4475 struct thin_c *tc = ti->private; in thin_io_hints()
4476 struct pool *pool = tc->pool; in thin_io_hints()
4478 if (!pool->pf.discard_enabled) in thin_io_hints()
4481 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; in thin_io_hints()
4482 limits->max_discard_sectors = 2048 * 1024 * 16; /* 16G */ in thin_io_hints()
4501 /*----------------------------------------------------------------*/
4505 int r = -ENOMEM; in dm_thin_init()
4548 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");