Lines Matching refs:tc
223 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
224 typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
380 struct thin_c *tc; member
386 static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *parent) in begin_discard() argument
390 op->tc = tc; in begin_discard()
398 struct thin_c *tc = op->tc; in issue_discard() local
399 sector_t s = block_to_sectors(tc->pool, data_b); in issue_discard()
400 sector_t len = block_to_sectors(tc->pool, data_e - data_b); in issue_discard()
402 return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, in issue_discard()
585 struct thin_c *tc; member
609 static void error_thin_bio_list(struct thin_c *tc, struct bio_list *master, in error_thin_bio_list() argument
617 spin_lock_irqsave(&tc->lock, flags); in error_thin_bio_list()
619 spin_unlock_irqrestore(&tc->lock, flags); in error_thin_bio_list()
624 static void requeue_deferred_cells(struct thin_c *tc) in requeue_deferred_cells() argument
626 struct pool *pool = tc->pool; in requeue_deferred_cells()
633 spin_lock_irqsave(&tc->lock, flags); in requeue_deferred_cells()
634 list_splice_init(&tc->deferred_cells, &cells); in requeue_deferred_cells()
635 spin_unlock_irqrestore(&tc->lock, flags); in requeue_deferred_cells()
641 static void requeue_io(struct thin_c *tc) in requeue_io() argument
648 spin_lock_irqsave(&tc->lock, flags); in requeue_io()
649 __merge_bio_list(&bios, &tc->deferred_bio_list); in requeue_io()
650 __merge_bio_list(&bios, &tc->retry_on_resume_list); in requeue_io()
651 spin_unlock_irqrestore(&tc->lock, flags); in requeue_io()
654 requeue_deferred_cells(tc); in requeue_io()
659 struct thin_c *tc; in error_retry_list_with_code() local
662 list_for_each_entry_rcu(tc, &pool->active_thins, list) in error_retry_list_with_code()
663 error_thin_bio_list(tc, &tc->retry_on_resume_list, error); in error_retry_list_with_code()
679 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) in get_bio_block() argument
681 struct pool *pool = tc->pool; in get_bio_block()
695 static void get_bio_block_range(struct thin_c *tc, struct bio *bio, in get_bio_block_range() argument
698 struct pool *pool = tc->pool; in get_bio_block_range()
720 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) in remap() argument
722 struct pool *pool = tc->pool; in remap()
725 bio_set_dev(bio, tc->pool_dev->bdev); in remap()
735 static void remap_to_origin(struct thin_c *tc, struct bio *bio) in remap_to_origin() argument
737 bio_set_dev(bio, tc->origin_dev->bdev); in remap_to_origin()
740 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) in bio_triggers_commit() argument
743 dm_thin_changed_this_transaction(tc->td); in bio_triggers_commit()
757 static void issue(struct thin_c *tc, struct bio *bio) in issue() argument
759 struct pool *pool = tc->pool; in issue()
762 if (!bio_triggers_commit(tc, bio)) { in issue()
772 if (dm_thin_aborted_changes(tc->td)) { in issue()
786 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio) in remap_to_origin_and_issue() argument
788 remap_to_origin(tc, bio); in remap_to_origin_and_issue()
789 issue(tc, bio); in remap_to_origin_and_issue()
792 static void remap_and_issue(struct thin_c *tc, struct bio *bio, in remap_and_issue() argument
795 remap(tc, bio, block); in remap_and_issue()
796 issue(tc, bio); in remap_and_issue()
818 struct thin_c *tc; member
835 struct pool *pool = m->tc->pool; in __complete_mapping_preparation()
846 struct pool *pool = m->tc->pool; in complete_mapping_preparation()
886 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell) in cell_defer_no_holder() argument
888 struct pool *pool = tc->pool; in cell_defer_no_holder()
891 spin_lock_irqsave(&tc->lock, flags); in cell_defer_no_holder()
892 cell_release_no_holder(pool, cell, &tc->deferred_bio_list); in cell_defer_no_holder()
893 spin_unlock_irqrestore(&tc->lock, flags); in cell_defer_no_holder()
898 static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
901 struct thin_c *tc; member
916 inc_all_io_entry(info->tc->pool, bio); in __inc_remap_and_issue_cell()
928 static void inc_remap_and_issue_cell(struct thin_c *tc, in inc_remap_and_issue_cell() argument
935 info.tc = tc; in inc_remap_and_issue_cell()
944 cell_visit_release(tc->pool, __inc_remap_and_issue_cell, in inc_remap_and_issue_cell()
948 thin_defer_bio(tc, bio); in inc_remap_and_issue_cell()
951 remap_and_issue(info.tc, bio, block); in inc_remap_and_issue_cell()
956 cell_error(m->tc->pool, m->cell); in process_prepared_mapping_fail()
958 mempool_free(m, &m->tc->pool->mapping_pool); in process_prepared_mapping_fail()
961 static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio) in complete_overwrite_bio() argument
963 struct pool *pool = tc->pool; in complete_overwrite_bio()
970 if (!bio_triggers_commit(tc, bio)) { in complete_overwrite_bio()
980 if (dm_thin_aborted_changes(tc->td)) { in complete_overwrite_bio()
996 struct thin_c *tc = m->tc; in process_prepared_mapping() local
997 struct pool *pool = tc->pool; in process_prepared_mapping()
1011 r = dm_thin_insert_block(tc->td, m->virt_begin, m->data_block); in process_prepared_mapping()
1025 inc_remap_and_issue_cell(tc, m->cell, m->data_block); in process_prepared_mapping()
1026 complete_overwrite_bio(tc, bio); in process_prepared_mapping()
1028 inc_all_io_entry(tc->pool, m->cell->holder); in process_prepared_mapping()
1029 remap_and_issue(tc, m->cell->holder, m->data_block); in process_prepared_mapping()
1030 inc_remap_and_issue_cell(tc, m->cell, m->data_block); in process_prepared_mapping()
1042 struct thin_c *tc = m->tc; in free_discard_mapping() local
1044 cell_defer_no_holder(tc, m->cell); in free_discard_mapping()
1045 mempool_free(m, &tc->pool->mapping_pool); in free_discard_mapping()
1063 struct thin_c *tc = m->tc; in process_prepared_discard_no_passdown() local
1065 r = dm_thin_remove_range(tc->td, m->cell->key.block_begin, m->cell->key.block_end); in process_prepared_discard_no_passdown()
1067 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r); in process_prepared_discard_no_passdown()
1072 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_no_passdown()
1073 mempool_free(m, &tc->pool->mapping_pool); in process_prepared_discard_no_passdown()
1087 struct thin_c *tc = m->tc; in passdown_double_checking_shared_status() local
1088 struct pool *pool = tc->pool; in passdown_double_checking_shared_status()
1092 begin_discard(&op, tc, discard_parent); in passdown_double_checking_shared_status()
1130 struct pool *pool = m->tc->pool; in queue_passdown_pt2()
1151 struct thin_c *tc = m->tc; in process_prepared_discard_passdown_pt1() local
1152 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt1()
1161 r = dm_thin_remove_range(tc->td, m->virt_begin, m->virt_end); in process_prepared_discard_passdown_pt1()
1165 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_passdown_pt1()
1178 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_passdown_pt1()
1186 dm_device_name(tc->pool->pool_md)); in process_prepared_discard_passdown_pt1()
1198 begin_discard(&op, tc, discard_parent); in process_prepared_discard_passdown_pt1()
1208 struct thin_c *tc = m->tc; in process_prepared_discard_passdown_pt2() local
1209 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt2()
1223 cell_defer_no_holder(tc, m->cell); in process_prepared_discard_passdown_pt2()
1290 static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m, in ll_zero() argument
1295 to.bdev = tc->pool_dev->bdev; in ll_zero()
1299 dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m); in ll_zero()
1302 static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio, in remap_and_issue_overwrite() argument
1306 struct pool *pool = tc->pool; in remap_and_issue_overwrite()
1313 remap_and_issue(tc, bio, data_begin); in remap_and_issue_overwrite()
1319 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, in schedule_copy() argument
1325 struct pool *pool = tc->pool; in schedule_copy()
1328 m->tc = tc; in schedule_copy()
1351 remap_and_issue_overwrite(tc, bio, data_dest, m); in schedule_copy()
1359 to.bdev = tc->pool_dev->bdev; in schedule_copy()
1371 ll_zero(tc, m, in schedule_copy()
1380 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, in schedule_internal_copy() argument
1384 schedule_copy(tc, virt_block, tc->pool_dev, in schedule_internal_copy()
1386 tc->pool->sectors_per_block); in schedule_internal_copy()
1389 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, in schedule_zero() argument
1393 struct pool *pool = tc->pool; in schedule_zero()
1397 m->tc = tc; in schedule_zero()
1410 remap_and_issue_overwrite(tc, bio, data_block, m); in schedule_zero()
1412 ll_zero(tc, m, data_block * pool->sectors_per_block, in schedule_zero()
1418 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, in schedule_external_copy() argument
1422 struct pool *pool = tc->pool; in schedule_external_copy()
1426 if (virt_block_end <= tc->origin_size) in schedule_external_copy()
1427 schedule_copy(tc, virt_block, tc->origin_dev, in schedule_external_copy()
1431 else if (virt_block_begin < tc->origin_size) in schedule_external_copy()
1432 schedule_copy(tc, virt_block, tc->origin_dev, in schedule_external_copy()
1434 tc->origin_size - virt_block_begin); in schedule_external_copy()
1437 schedule_zero(tc, virt_block, data_dest, cell, bio); in schedule_external_copy()
1526 static int alloc_data_block(struct thin_c *tc, dm_block_t *result) in alloc_data_block() argument
1530 struct pool *pool = tc->pool; in alloc_data_block()
1596 struct thin_c *tc = h->tc; in retry_on_resume() local
1599 spin_lock_irqsave(&tc->lock, flags); in retry_on_resume()
1600 bio_list_add(&tc->retry_on_resume_list, bio); in retry_on_resume()
1601 spin_unlock_irqrestore(&tc->lock, flags); in retry_on_resume()
1658 static void process_discard_cell_no_passdown(struct thin_c *tc, in process_discard_cell_no_passdown() argument
1661 struct pool *pool = tc->pool; in process_discard_cell_no_passdown()
1668 m->tc = tc; in process_discard_cell_no_passdown()
1678 static void break_up_discard_bio(struct thin_c *tc, dm_block_t begin, dm_block_t end, in break_up_discard_bio() argument
1681 struct pool *pool = tc->pool; in break_up_discard_bio()
1696 r = dm_thin_find_mapped_range(tc->td, begin, end, &virt_begin, &virt_end, in break_up_discard_bio()
1705 build_key(tc->td, PHYSICAL, data_begin, data_begin + (virt_end - virt_begin), &data_key); in break_up_discard_bio()
1706 if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) { in break_up_discard_bio()
1717 m->tc = tc; in break_up_discard_bio()
1741 static void process_discard_cell_passdown(struct thin_c *tc, struct dm_bio_prison_cell *virt_cell) in process_discard_cell_passdown() argument
1752 break_up_discard_bio(tc, virt_cell->key.block_begin, virt_cell->key.block_end, bio); in process_discard_cell_passdown()
1762 static void process_discard_bio(struct thin_c *tc, struct bio *bio) in process_discard_bio() argument
1768 get_bio_block_range(tc, bio, &begin, &end); in process_discard_bio()
1777 build_key(tc->td, VIRTUAL, begin, end, &virt_key); in process_discard_bio()
1778 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) in process_discard_bio()
1788 tc->pool->process_discard_cell(tc, virt_cell); in process_discard_bio()
1791 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, in break_sharing() argument
1798 struct pool *pool = tc->pool; in break_sharing()
1800 r = alloc_data_block(tc, &data_block); in break_sharing()
1803 schedule_internal_copy(tc, block, lookup_result->block, in break_sharing()
1832 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds); in __remap_and_issue_shared_cell()
1833 inc_all_io_entry(info->tc->pool, bio); in __remap_and_issue_shared_cell()
1839 static void remap_and_issue_shared_cell(struct thin_c *tc, in remap_and_issue_shared_cell() argument
1846 info.tc = tc; in remap_and_issue_shared_cell()
1850 cell_visit_release(tc->pool, __remap_and_issue_shared_cell, in remap_and_issue_shared_cell()
1854 thin_defer_bio(tc, bio); in remap_and_issue_shared_cell()
1857 remap_and_issue(tc, bio, block); in remap_and_issue_shared_cell()
1860 static void process_shared_bio(struct thin_c *tc, struct bio *bio, in process_shared_bio() argument
1866 struct pool *pool = tc->pool; in process_shared_bio()
1873 build_data_key(tc->td, lookup_result->block, &key); in process_shared_bio()
1875 cell_defer_no_holder(tc, virt_cell); in process_shared_bio()
1880 break_sharing(tc, bio, block, &key, lookup_result, data_cell); in process_shared_bio()
1881 cell_defer_no_holder(tc, virt_cell); in process_shared_bio()
1887 remap_and_issue(tc, bio, lookup_result->block); in process_shared_bio()
1889 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block); in process_shared_bio()
1890 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block); in process_shared_bio()
1894 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, in provision_block() argument
1899 struct pool *pool = tc->pool; in provision_block()
1906 cell_defer_no_holder(tc, cell); in provision_block()
1908 remap_and_issue(tc, bio, 0); in provision_block()
1917 cell_defer_no_holder(tc, cell); in provision_block()
1922 r = alloc_data_block(tc, &data_block); in provision_block()
1925 if (tc->origin_dev) in provision_block()
1926 schedule_external_copy(tc, block, data_block, cell, bio); in provision_block()
1928 schedule_zero(tc, block, data_block, cell, bio); in provision_block()
1943 static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell() argument
1946 struct pool *pool = tc->pool; in process_cell()
1948 dm_block_t block = get_bio_block(tc, bio); in process_cell()
1951 if (tc->requeue_mode) { in process_cell()
1956 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); in process_cell()
1960 process_shared_bio(tc, bio, block, &lookup_result, cell); in process_cell()
1963 remap_and_issue(tc, bio, lookup_result.block); in process_cell()
1964 inc_remap_and_issue_cell(tc, cell, lookup_result.block); in process_cell()
1969 if (bio_data_dir(bio) == READ && tc->origin_dev) { in process_cell()
1971 cell_defer_no_holder(tc, cell); in process_cell()
1973 if (bio_end_sector(bio) <= tc->origin_size) in process_cell()
1974 remap_to_origin_and_issue(tc, bio); in process_cell()
1976 else if (bio->bi_iter.bi_sector < tc->origin_size) { in process_cell()
1978 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT; in process_cell()
1979 remap_to_origin_and_issue(tc, bio); in process_cell()
1986 provision_block(tc, bio, block, cell); in process_cell()
1992 cell_defer_no_holder(tc, cell); in process_cell()
1998 static void process_bio(struct thin_c *tc, struct bio *bio) in process_bio() argument
2000 struct pool *pool = tc->pool; in process_bio()
2001 dm_block_t block = get_bio_block(tc, bio); in process_bio()
2009 build_virtual_key(tc->td, block, &key); in process_bio()
2013 process_cell(tc, cell); in process_bio()
2016 static void __process_bio_read_only(struct thin_c *tc, struct bio *bio, in __process_bio_read_only() argument
2021 dm_block_t block = get_bio_block(tc, bio); in __process_bio_read_only()
2024 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); in __process_bio_read_only()
2028 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2030 cell_defer_no_holder(tc, cell); in __process_bio_read_only()
2032 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2033 remap_and_issue(tc, bio, lookup_result.block); in __process_bio_read_only()
2035 inc_remap_and_issue_cell(tc, cell, lookup_result.block); in __process_bio_read_only()
2041 cell_defer_no_holder(tc, cell); in __process_bio_read_only()
2043 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2047 if (tc->origin_dev) { in __process_bio_read_only()
2048 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2049 remap_to_origin_and_issue(tc, bio); in __process_bio_read_only()
2061 cell_defer_no_holder(tc, cell); in __process_bio_read_only()
2067 static void process_bio_read_only(struct thin_c *tc, struct bio *bio) in process_bio_read_only() argument
2069 __process_bio_read_only(tc, bio, NULL); in process_bio_read_only()
2072 static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell_read_only() argument
2074 __process_bio_read_only(tc, cell->holder, cell); in process_cell_read_only()
2077 static void process_bio_success(struct thin_c *tc, struct bio *bio) in process_bio_success() argument
2082 static void process_bio_fail(struct thin_c *tc, struct bio *bio) in process_bio_fail() argument
2087 static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell_success() argument
2089 cell_success(tc->pool, cell); in process_cell_success()
2092 static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell) in process_cell_fail() argument
2094 cell_error(tc->pool, cell); in process_cell_fail()
2110 static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio) in __thin_bio_rb_add() argument
2116 rbp = &tc->sort_bio_list.rb_node; in __thin_bio_rb_add()
2130 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list); in __thin_bio_rb_add()
2133 static void __extract_sorted_bios(struct thin_c *tc) in __extract_sorted_bios() argument
2139 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) { in __extract_sorted_bios()
2143 bio_list_add(&tc->deferred_bio_list, bio); in __extract_sorted_bios()
2144 rb_erase(&pbd->rb_node, &tc->sort_bio_list); in __extract_sorted_bios()
2147 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list)); in __extract_sorted_bios()
2150 static void __sort_thin_deferred_bios(struct thin_c *tc) in __sort_thin_deferred_bios() argument
2156 bio_list_merge(&bios, &tc->deferred_bio_list); in __sort_thin_deferred_bios()
2157 bio_list_init(&tc->deferred_bio_list); in __sort_thin_deferred_bios()
2161 __thin_bio_rb_add(tc, bio); in __sort_thin_deferred_bios()
2168 __extract_sorted_bios(tc); in __sort_thin_deferred_bios()
2171 static void process_thin_deferred_bios(struct thin_c *tc) in process_thin_deferred_bios() argument
2173 struct pool *pool = tc->pool; in process_thin_deferred_bios()
2180 if (tc->requeue_mode) { in process_thin_deferred_bios()
2181 error_thin_bio_list(tc, &tc->deferred_bio_list, in process_thin_deferred_bios()
2188 spin_lock_irqsave(&tc->lock, flags); in process_thin_deferred_bios()
2190 if (bio_list_empty(&tc->deferred_bio_list)) { in process_thin_deferred_bios()
2191 spin_unlock_irqrestore(&tc->lock, flags); in process_thin_deferred_bios()
2195 __sort_thin_deferred_bios(tc); in process_thin_deferred_bios()
2197 bio_list_merge(&bios, &tc->deferred_bio_list); in process_thin_deferred_bios()
2198 bio_list_init(&tc->deferred_bio_list); in process_thin_deferred_bios()
2200 spin_unlock_irqrestore(&tc->lock, flags); in process_thin_deferred_bios()
2210 spin_lock_irqsave(&tc->lock, flags); in process_thin_deferred_bios()
2211 bio_list_add(&tc->deferred_bio_list, bio); in process_thin_deferred_bios()
2212 bio_list_merge(&tc->deferred_bio_list, &bios); in process_thin_deferred_bios()
2213 spin_unlock_irqrestore(&tc->lock, flags); in process_thin_deferred_bios()
2218 pool->process_discard(tc, bio); in process_thin_deferred_bios()
2220 pool->process_bio(tc, bio); in process_thin_deferred_bios()
2265 static void process_thin_deferred_cells(struct thin_c *tc) in process_thin_deferred_cells() argument
2267 struct pool *pool = tc->pool; in process_thin_deferred_cells()
2275 spin_lock_irqsave(&tc->lock, flags); in process_thin_deferred_cells()
2276 list_splice_init(&tc->deferred_cells, &cells); in process_thin_deferred_cells()
2277 spin_unlock_irqrestore(&tc->lock, flags); in process_thin_deferred_cells()
2283 count = sort_cells(tc->pool, &cells); in process_thin_deferred_cells()
2298 spin_lock_irqsave(&tc->lock, flags); in process_thin_deferred_cells()
2299 list_splice(&cells, &tc->deferred_cells); in process_thin_deferred_cells()
2300 spin_unlock_irqrestore(&tc->lock, flags); in process_thin_deferred_cells()
2305 pool->process_discard_cell(tc, cell); in process_thin_deferred_cells()
2307 pool->process_cell(tc, cell); in process_thin_deferred_cells()
2312 static void thin_get(struct thin_c *tc);
2313 static void thin_put(struct thin_c *tc);
2322 struct thin_c *tc = NULL; in get_first_thin() local
2326 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); in get_first_thin()
2327 thin_get(tc); in get_first_thin()
2331 return tc; in get_first_thin()
2334 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) in get_next_thin() argument
2336 struct thin_c *old_tc = tc; in get_next_thin()
2339 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { in get_next_thin()
2340 thin_get(tc); in get_next_thin()
2343 return tc; in get_next_thin()
2356 struct thin_c *tc; in process_deferred_bios() local
2358 tc = get_first_thin(pool); in process_deferred_bios()
2359 while (tc) { in process_deferred_bios()
2360 process_thin_deferred_cells(tc); in process_deferred_bios()
2361 process_thin_deferred_bios(tc); in process_deferred_bios()
2362 tc = get_next_thin(pool, tc); in process_deferred_bios()
2483 struct thin_c *tc; member
2494 w->tc->requeue_mode = true; in do_noflush_start()
2495 requeue_io(w->tc); in do_noflush_start()
2502 w->tc->requeue_mode = false; in do_noflush_stop()
2506 static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) in noflush_work() argument
2510 w.tc = tc; in noflush_work()
2511 pool_work_wait(&w.pw, tc->pool, fn); in noflush_work()
2667 static void thin_defer_bio(struct thin_c *tc, struct bio *bio) in thin_defer_bio() argument
2670 struct pool *pool = tc->pool; in thin_defer_bio()
2672 spin_lock_irqsave(&tc->lock, flags); in thin_defer_bio()
2673 bio_list_add(&tc->deferred_bio_list, bio); in thin_defer_bio()
2674 spin_unlock_irqrestore(&tc->lock, flags); in thin_defer_bio()
2679 static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio) in thin_defer_bio_with_throttle() argument
2681 struct pool *pool = tc->pool; in thin_defer_bio_with_throttle()
2684 thin_defer_bio(tc, bio); in thin_defer_bio_with_throttle()
2688 static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell) in thin_defer_cell() argument
2691 struct pool *pool = tc->pool; in thin_defer_cell()
2694 spin_lock_irqsave(&tc->lock, flags); in thin_defer_cell()
2695 list_add_tail(&cell->user_list, &tc->deferred_cells); in thin_defer_cell()
2696 spin_unlock_irqrestore(&tc->lock, flags); in thin_defer_cell()
2702 static void thin_hook_bio(struct thin_c *tc, struct bio *bio) in thin_hook_bio() argument
2706 h->tc = tc; in thin_hook_bio()
2719 struct thin_c *tc = ti->private; in thin_bio_map() local
2720 dm_block_t block = get_bio_block(tc, bio); in thin_bio_map()
2721 struct dm_thin_device *td = tc->td; in thin_bio_map()
2726 thin_hook_bio(tc, bio); in thin_bio_map()
2728 if (tc->requeue_mode) { in thin_bio_map()
2734 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_bio_map()
2740 thin_defer_bio_with_throttle(tc, bio); in thin_bio_map()
2748 build_virtual_key(tc->td, block, &key); in thin_bio_map()
2749 if (bio_detain(tc->pool, &key, bio, &virt_cell)) in thin_bio_map()
2774 thin_defer_cell(tc, virt_cell); in thin_bio_map()
2778 build_data_key(tc->td, result.block, &key); in thin_bio_map()
2779 if (bio_detain(tc->pool, &key, bio, &data_cell)) { in thin_bio_map()
2780 cell_defer_no_holder(tc, virt_cell); in thin_bio_map()
2784 inc_all_io_entry(tc->pool, bio); in thin_bio_map()
2785 cell_defer_no_holder(tc, data_cell); in thin_bio_map()
2786 cell_defer_no_holder(tc, virt_cell); in thin_bio_map()
2788 remap(tc, bio, result.block); in thin_bio_map()
2793 thin_defer_cell(tc, virt_cell); in thin_bio_map()
2803 cell_defer_no_holder(tc, virt_cell); in thin_bio_map()
2823 struct thin_c *tc; in requeue_bios() local
2826 list_for_each_entry_rcu(tc, &pool->active_thins, list) { in requeue_bios()
2827 spin_lock_irqsave(&tc->lock, flags); in requeue_bios()
2828 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list); in requeue_bios()
2829 bio_list_init(&tc->retry_on_resume_list); in requeue_bios()
2830 spin_unlock_irqrestore(&tc->lock, flags); in requeue_bios()
3606 struct thin_c *tc; in pool_suspend_active_thins() local
3609 tc = get_first_thin(pool); in pool_suspend_active_thins()
3610 while (tc) { in pool_suspend_active_thins()
3611 dm_internal_suspend_noflush(tc->thin_md); in pool_suspend_active_thins()
3612 tc = get_next_thin(pool, tc); in pool_suspend_active_thins()
3618 struct thin_c *tc; in pool_resume_active_thins() local
3621 tc = get_first_thin(pool); in pool_resume_active_thins()
3622 while (tc) { in pool_resume_active_thins()
3623 dm_internal_resume(tc->thin_md); in pool_resume_active_thins()
3624 tc = get_next_thin(pool, tc); in pool_resume_active_thins()
4137 static void thin_get(struct thin_c *tc) in thin_get() argument
4139 refcount_inc(&tc->refcount); in thin_get()
4142 static void thin_put(struct thin_c *tc) in thin_put() argument
4144 if (refcount_dec_and_test(&tc->refcount)) in thin_put()
4145 complete(&tc->can_destroy); in thin_put()
4150 struct thin_c *tc = ti->private; in thin_dtr() local
4153 spin_lock_irqsave(&tc->pool->lock, flags); in thin_dtr()
4154 list_del_rcu(&tc->list); in thin_dtr()
4155 spin_unlock_irqrestore(&tc->pool->lock, flags); in thin_dtr()
4158 thin_put(tc); in thin_dtr()
4159 wait_for_completion(&tc->can_destroy); in thin_dtr()
4163 __pool_dec(tc->pool); in thin_dtr()
4164 dm_pool_close_thin_device(tc->td); in thin_dtr()
4165 dm_put_device(ti, tc->pool_dev); in thin_dtr()
4166 if (tc->origin_dev) in thin_dtr()
4167 dm_put_device(ti, tc->origin_dev); in thin_dtr()
4168 kfree(tc); in thin_dtr()
4188 struct thin_c *tc; in thin_ctr() local
4201 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL); in thin_ctr()
4202 if (!tc) { in thin_ctr()
4207 tc->thin_md = dm_table_get_md(ti->table); in thin_ctr()
4208 spin_lock_init(&tc->lock); in thin_ctr()
4209 INIT_LIST_HEAD(&tc->deferred_cells); in thin_ctr()
4210 bio_list_init(&tc->deferred_bio_list); in thin_ctr()
4211 bio_list_init(&tc->retry_on_resume_list); in thin_ctr()
4212 tc->sort_bio_list = RB_ROOT; in thin_ctr()
4226 tc->origin_dev = origin_dev; in thin_ctr()
4234 tc->pool_dev = pool_dev; in thin_ctr()
4236 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) { in thin_ctr()
4242 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev); in thin_ctr()
4249 tc->pool = __pool_table_lookup(pool_md); in thin_ctr()
4250 if (!tc->pool) { in thin_ctr()
4255 __pool_inc(tc->pool); in thin_ctr()
4257 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_ctr()
4263 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); in thin_ctr()
4269 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); in thin_ctr()
4278 if (tc->pool->pf.discard_enabled) { in thin_ctr()
4285 spin_lock_irqsave(&tc->pool->lock, flags); in thin_ctr()
4286 if (tc->pool->suspended) { in thin_ctr()
4287 spin_unlock_irqrestore(&tc->pool->lock, flags); in thin_ctr()
4293 refcount_set(&tc->refcount, 1); in thin_ctr()
4294 init_completion(&tc->can_destroy); in thin_ctr()
4295 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); in thin_ctr()
4296 spin_unlock_irqrestore(&tc->pool->lock, flags); in thin_ctr()
4310 dm_pool_close_thin_device(tc->td); in thin_ctr()
4312 __pool_dec(tc->pool); in thin_ctr()
4316 dm_put_device(ti, tc->pool_dev); in thin_ctr()
4318 if (tc->origin_dev) in thin_ctr()
4319 dm_put_device(ti, tc->origin_dev); in thin_ctr()
4321 kfree(tc); in thin_ctr()
4342 struct pool *pool = h->tc->pool; in thin_endio()
4369 cell_defer_no_holder(h->tc, h->cell); in thin_endio()
4376 struct thin_c *tc = ti->private; in thin_presuspend() local
4379 noflush_work(tc, do_noflush_start); in thin_presuspend()
4384 struct thin_c *tc = ti->private; in thin_postsuspend() local
4390 noflush_work(tc, do_noflush_stop); in thin_postsuspend()
4395 struct thin_c *tc = ti->private; in thin_preresume() local
4397 if (tc->origin_dev) in thin_preresume()
4398 tc->origin_size = get_dev_size(tc->origin_dev->bdev); in thin_preresume()
4413 struct thin_c *tc = ti->private; in thin_status() local
4415 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_status()
4420 if (!tc->td) in thin_status()
4425 r = dm_thin_get_mapped_count(tc->td, &mapped); in thin_status()
4431 r = dm_thin_get_highest_mapped_block(tc->td, &highest); in thin_status()
4437 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block); in thin_status()
4440 tc->pool->sectors_per_block) - 1); in thin_status()
4447 format_dev_t(buf, tc->pool_dev->bdev->bd_dev), in thin_status()
4448 (unsigned long) tc->dev_id); in thin_status()
4449 if (tc->origin_dev) in thin_status()
4450 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev)); in thin_status()
4465 struct thin_c *tc = ti->private; in thin_iterate_devices() local
4466 struct pool *pool = tc->pool; in thin_iterate_devices()
4478 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); in thin_iterate_devices()
4485 struct thin_c *tc = ti->private; in thin_io_hints() local
4486 struct pool *pool = tc->pool; in thin_io_hints()