Lines Matching refs:ti
229 static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, in device_area_is_invalid() argument
246 dm_device_name(ti->table->md), bdevname(bdev, b), in device_area_is_invalid()
262 dm_device_name(ti->table->md), in device_area_is_invalid()
279 dm_device_name(ti->table->md), in device_area_is_invalid()
292 dm_device_name(ti->table->md), in device_area_is_invalid()
301 dm_device_name(ti->table->md), in device_area_is_invalid()
352 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, in dm_get_device() argument
360 struct dm_table *t = ti->table; in dm_get_device()
402 static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, in dm_set_device_limits() argument
412 dm_device_name(ti->table->md), bdevname(bdev, b)); in dm_set_device_limits()
421 dm_device_name(ti->table->md), bdevname(bdev, b), in dm_set_device_limits()
432 void dm_put_device(struct dm_target *ti, struct dm_dev *d) in dm_put_device() argument
435 struct list_head *devices = &ti->table->devices; in dm_put_device()
446 dm_device_name(ti->table->md), d->name); in dm_put_device()
450 dm_put_table_device(ti->table->md, d); in dm_put_device()
460 static int adjoin(struct dm_table *table, struct dm_target *ti) in adjoin() argument
465 return !ti->begin; in adjoin()
468 return (ti->begin == (prev->begin + prev->len)); in adjoin()
595 struct dm_target *ti; in validate_hardware_logical_block_alignment() local
603 ti = dm_table_get_target(table, i); in validate_hardware_logical_block_alignment()
608 if (ti->type->iterate_devices) in validate_hardware_logical_block_alignment()
609 ti->type->iterate_devices(ti, dm_set_device_limits, in validate_hardware_logical_block_alignment()
616 if (remaining < ti->len && in validate_hardware_logical_block_alignment()
622 (unsigned short) ((next_target_start + ti->len) & in validate_hardware_logical_block_alignment()
632 (unsigned long long) ti->begin, in validate_hardware_logical_block_alignment()
633 (unsigned long long) ti->len, in validate_hardware_logical_block_alignment()
814 int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, in device_not_dax_capable() argument
823 static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev, in device_not_dax_synchronous_capable() argument
832 struct dm_target *ti; in dm_table_supports_dax() local
837 ti = dm_table_get_target(t, i); in dm_table_supports_dax()
839 if (!ti->type->direct_access) in dm_table_supports_dax()
842 if (!ti->type->iterate_devices || in dm_table_supports_dax()
843 ti->type->iterate_devices(ti, iterate_fn, blocksize)) in dm_table_supports_dax()
850 static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev, in device_is_rq_stackable() argument
989 struct dm_target *ti; in dm_table_get_wildcard_target() local
993 ti = dm_table_get_target(t, i); in dm_table_get_wildcard_target()
994 if (dm_target_is_wildcard(ti->type)) in dm_table_get_wildcard_target()
995 return ti; in dm_table_get_wildcard_target()
1016 struct dm_target *ti; in dm_table_alloc_md_mempools() local
1026 ti = t->targets + i; in dm_table_alloc_md_mempools()
1027 per_io_data_size = max(per_io_data_size, ti->per_io_data_size); in dm_table_alloc_md_mempools()
1028 min_pool_size = max(min_pool_size, ti->num_flush_bios); in dm_table_alloc_md_mempools()
1115 struct dm_target *ti = dm_table_get_target(t, i); in dm_table_get_integrity_disk() local
1116 if (!dm_target_passes_integrity(ti->type)) in dm_table_get_integrity_disk()
1199 static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev, in dm_keyslot_evict_callback() argument
1222 struct dm_target *ti; in dm_keyslot_evict() local
1228 ti = dm_table_get_target(t, i); in dm_keyslot_evict()
1229 if (!ti->type->iterate_devices) in dm_keyslot_evict()
1231 ti->type->iterate_devices(ti, dm_keyslot_evict_callback, in dm_keyslot_evict()
1246 static int dm_derive_raw_secret_callback(struct dm_target *ti, in dm_derive_raw_secret_callback() argument
1293 struct dm_target *ti; in dm_derive_raw_secret() local
1299 ti = dm_table_get_target(t, i); in dm_derive_raw_secret()
1300 if (!ti->type->iterate_devices) in dm_derive_raw_secret()
1302 ti->type->iterate_devices(ti, dm_derive_raw_secret_callback, in dm_derive_raw_secret()
1317 static int device_intersect_crypto_modes(struct dm_target *ti, in device_intersect_crypto_modes() argument
1362 struct dm_target *ti; in dm_table_construct_keyslot_manager() local
1381 ti = dm_table_get_target(t, i); in dm_table_construct_keyslot_manager()
1383 if (!dm_target_passes_crypto(ti->type)) { in dm_table_construct_keyslot_manager()
1387 if (!ti->type->iterate_devices) in dm_table_construct_keyslot_manager()
1389 ti->type->iterate_devices(ti, device_intersect_crypto_modes, in dm_table_construct_keyslot_manager()
1588 struct dm_target *ti; in dm_table_any_dev_attr() local
1592 ti = dm_table_get_target(t, i); in dm_table_any_dev_attr()
1594 if (ti->type->iterate_devices && in dm_table_any_dev_attr()
1595 ti->type->iterate_devices(ti, func, data)) in dm_table_any_dev_attr()
1602 static int count_device(struct dm_target *ti, struct dm_dev *dev, in count_device() argument
1620 struct dm_target *ti; in dm_table_has_no_data_devices() local
1624 ti = dm_table_get_target(table, i); in dm_table_has_no_data_devices()
1626 if (!ti->type->iterate_devices) in dm_table_has_no_data_devices()
1630 ti->type->iterate_devices(ti, count_device, &num_devices); in dm_table_has_no_data_devices()
1638 static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev, in device_not_zoned_model() argument
1657 struct dm_target *ti; in dm_table_supports_zoned_model() local
1661 ti = dm_table_get_target(t, i); in dm_table_supports_zoned_model()
1663 if (dm_target_supports_zoned_hm(ti->type)) { in dm_table_supports_zoned_model()
1664 if (!ti->type->iterate_devices || in dm_table_supports_zoned_model()
1665 ti->type->iterate_devices(ti, device_not_zoned_model, in dm_table_supports_zoned_model()
1668 } else if (!dm_target_supports_mixed_zoned_model(ti->type)) { in dm_table_supports_zoned_model()
1677 static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, in device_not_matches_zone_sectors() argument
1726 struct dm_target *ti; in dm_calculate_queue_limits() local
1737 ti = dm_table_get_target(table, i); in dm_calculate_queue_limits()
1739 if (!ti->type->iterate_devices) in dm_calculate_queue_limits()
1745 ti->type->iterate_devices(ti, dm_set_device_limits, in dm_calculate_queue_limits()
1758 if (ti->type->io_hints) in dm_calculate_queue_limits()
1759 ti->type->io_hints(ti, &ti_limits); in dm_calculate_queue_limits()
1765 if (ti->type->iterate_devices(ti, device_area_is_invalid, in dm_calculate_queue_limits()
1779 (unsigned long long) ti->begin, in dm_calculate_queue_limits()
1780 (unsigned long long) ti->len); in dm_calculate_queue_limits()
1834 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, in device_flush_capable() argument
1845 struct dm_target *ti; in dm_table_supports_flush() local
1855 ti = dm_table_get_target(t, i); in dm_table_supports_flush()
1857 if (!ti->num_flush_bios) in dm_table_supports_flush()
1860 if (ti->flush_supported) in dm_table_supports_flush()
1863 if (ti->type->iterate_devices && in dm_table_supports_flush()
1864 ti->type->iterate_devices(ti, device_flush_capable, (void *) flush)) in dm_table_supports_flush()
1871 static int device_dax_write_cache_enabled(struct dm_target *ti, in device_dax_write_cache_enabled() argument
1885 static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, in device_is_rotational() argument
1893 static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, in device_is_not_random() argument
1901 static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, in device_not_write_same_capable() argument
1911 struct dm_target *ti; in dm_table_supports_write_same() local
1915 ti = dm_table_get_target(t, i); in dm_table_supports_write_same()
1917 if (!ti->num_write_same_bios) in dm_table_supports_write_same()
1920 if (!ti->type->iterate_devices || in dm_table_supports_write_same()
1921 ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) in dm_table_supports_write_same()
1928 static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev *dev, in device_not_write_zeroes_capable() argument
1938 struct dm_target *ti; in dm_table_supports_write_zeroes() local
1942 ti = dm_table_get_target(t, i++); in dm_table_supports_write_zeroes()
1944 if (!ti->num_write_zeroes_bios) in dm_table_supports_write_zeroes()
1947 if (!ti->type->iterate_devices || in dm_table_supports_write_zeroes()
1948 ti->type->iterate_devices(ti, device_not_write_zeroes_capable, NULL)) in dm_table_supports_write_zeroes()
1955 static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev, in device_not_nowait_capable() argument
1965 struct dm_target *ti; in dm_table_supports_nowait() local
1969 ti = dm_table_get_target(t, i++); in dm_table_supports_nowait()
1971 if (!dm_target_supports_nowait(ti->type)) in dm_table_supports_nowait()
1974 if (!ti->type->iterate_devices || in dm_table_supports_nowait()
1975 ti->type->iterate_devices(ti, device_not_nowait_capable, NULL)) in dm_table_supports_nowait()
1982 static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, in device_not_discard_capable() argument
1992 struct dm_target *ti; in dm_table_supports_discards() local
1996 ti = dm_table_get_target(t, i); in dm_table_supports_discards()
1998 if (!ti->num_discard_bios) in dm_table_supports_discards()
2006 if (!ti->discards_supported && in dm_table_supports_discards()
2007 (!ti->type->iterate_devices || in dm_table_supports_discards()
2008 ti->type->iterate_devices(ti, device_not_discard_capable, NULL))) in dm_table_supports_discards()
2015 static int device_not_secure_erase_capable(struct dm_target *ti, in device_not_secure_erase_capable() argument
2026 struct dm_target *ti; in dm_table_supports_secure_erase() local
2030 ti = dm_table_get_target(t, i); in dm_table_supports_secure_erase()
2032 if (!ti->num_secure_erase_bios) in dm_table_supports_secure_erase()
2035 if (!ti->type->iterate_devices || in dm_table_supports_secure_erase()
2036 ti->type->iterate_devices(ti, device_not_secure_erase_capable, NULL)) in dm_table_supports_secure_erase()
2043 static int device_requires_stable_pages(struct dm_target *ti, in device_requires_stable_pages() argument
2177 struct dm_target *ti = t->targets; in suspend_targets() local
2184 if (ti->type->presuspend) in suspend_targets()
2185 ti->type->presuspend(ti); in suspend_targets()
2188 if (ti->type->presuspend_undo) in suspend_targets()
2189 ti->type->presuspend_undo(ti); in suspend_targets()
2192 if (ti->type->postsuspend) in suspend_targets()
2193 ti->type->postsuspend(ti); in suspend_targets()
2196 ti++; in suspend_targets()
2231 struct dm_target *ti = t->targets + i; in dm_table_resume_targets() local
2233 if (!ti->type->preresume) in dm_table_resume_targets()
2236 r = ti->type->preresume(ti); in dm_table_resume_targets()
2239 dm_device_name(t->md), ti->type->name, r); in dm_table_resume_targets()
2245 struct dm_target *ti = t->targets + i; in dm_table_resume_targets() local
2247 if (ti->type->resume) in dm_table_resume_targets()
2248 ti->type->resume(ti); in dm_table_resume_targets()