• Home
  • Raw
  • Download

Lines Matching refs:mapped_device

97 	struct mapped_device *md;
338 int dm_deleting_md(struct mapped_device *md) in dm_deleting_md()
345 struct mapped_device *md; in dm_blk_open()
369 struct mapped_device *md; in dm_blk_close()
386 int dm_open_count(struct mapped_device *md) in dm_open_count()
394 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) in dm_lock_for_deletion()
414 int dm_cancel_deferred_remove(struct mapped_device *md) in dm_cancel_deferred_remove()
437 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_getgeo()
476 struct mapped_device *md = disk->private_data; in dm_blk_report_zones()
520 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, in dm_prepare_ioctl()
554 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) in dm_unprepare_ioctl()
562 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_ioctl()
600 struct mapped_device *md = io->md; in start_io_acct()
612 struct mapped_device *md = io->md; in end_io_acct()
628 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) in alloc_io()
655 static void free_io(struct mapped_device *md, struct dm_io *io) in free_io()
695 static void queue_io(struct mapped_device *md, struct bio *bio) in queue_io()
710 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barri… in dm_get_live_table()
717 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) in dm_put_live_table()
722 void dm_sync_table(struct mapped_device *md) in dm_sync_table()
732 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) in dm_get_live_table_fast()
738 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) in dm_put_live_table_fast()
749 struct mapped_device *md) in open_table_device()
775 static void close_table_device(struct table_device *td, struct mapped_device *md) in close_table_device()
799 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, in dm_get_table_device()
837 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) in dm_put_table_device()
867 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_get_geometry()
877 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_set_geometry()
891 static int __noflush_suspending(struct mapped_device *md) in __noflush_suspending()
905 struct mapped_device *md = io->md; in dec_pending()
954 void disable_discard(struct mapped_device *md) in disable_discard()
963 void disable_write_same(struct mapped_device *md) in disable_write_same()
971 void disable_write_zeroes(struct mapped_device *md) in disable_write_zeroes()
989 struct mapped_device *md = tio->io->md; in clone_endio()
1035 struct mapped_device *md = io->md; in clone_endio()
1091 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, in dm_dax_get_live_target()
1112 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_direct_access()
1139 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_supported()
1159 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_copy_from_iter()
1183 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_copy_to_iter()
1207 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_zero_page_range()
1276 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) in __set_swap_bios_limit()
1312 struct mapped_device *md = io->md; in __map_bio()
1331 struct mapped_device *md = io->md; in __map_bio()
1339 struct mapped_device *md = io->md; in __map_bio()
1614 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, in init_clone_info()
1628 static blk_qc_t __split_and_process_bio(struct mapped_device *md, in __split_and_process_bio()
1689 struct mapped_device *md = bio->bi_disk->private_data; in dm_submit_bio()
1781 static void cleanup_mapped_device(struct mapped_device *md) in cleanup_mapped_device()
1823 static struct mapped_device *alloc_dev(int minor) in alloc_dev()
1826 struct mapped_device *md; in alloc_dev()
1936 static void unlock_fs(struct mapped_device *md);
1938 static void free_dev(struct mapped_device *md) in free_dev()
1954 static int __bind_mempools(struct mapped_device *md, struct dm_table *t) in __bind_mempools()
2003 struct mapped_device *md = (struct mapped_device *) context; in event_callback()
2019 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, in __bind()
2072 static struct dm_table *__unbind(struct mapped_device *md) in __unbind()
2089 int dm_create(int minor, struct mapped_device **result) in dm_create()
2092 struct mapped_device *md; in dm_create()
2112 void dm_lock_md_type(struct mapped_device *md) in dm_lock_md_type()
2117 void dm_unlock_md_type(struct mapped_device *md) in dm_unlock_md_type()
2122 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) in dm_set_md_type()
2128 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) in dm_get_md_type()
2133 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) in dm_get_immutable_target_type()
2142 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) in dm_get_queue_limits()
2152 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) in dm_setup_md_queue()
2186 struct mapped_device *dm_get_md(dev_t dev) in dm_get_md()
2188 struct mapped_device *md; in dm_get_md()
2210 void *dm_get_mdptr(struct mapped_device *md) in dm_get_mdptr()
2215 void dm_set_mdptr(struct mapped_device *md, void *ptr) in dm_set_mdptr()
2220 void dm_get(struct mapped_device *md) in dm_get()
2226 int dm_hold(struct mapped_device *md) in dm_hold()
2239 const char *dm_device_name(struct mapped_device *md) in dm_device_name()
2245 static void __dm_destroy(struct mapped_device *md, bool wait) in __dm_destroy()
2293 void dm_destroy(struct mapped_device *md) in dm_destroy()
2298 void dm_destroy_immediate(struct mapped_device *md) in dm_destroy_immediate()
2303 void dm_put(struct mapped_device *md) in dm_put()
2309 static bool md_in_flight_bios(struct mapped_device *md) in md_in_flight_bios()
2323 static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state) in dm_wait_for_bios_completion()
2346 static int dm_wait_for_completion(struct mapped_device *md, long task_state) in dm_wait_for_completion()
2373 struct mapped_device *md = container_of(work, struct mapped_device, work); in dm_wq_work()
2388 static void dm_queue_flush(struct mapped_device *md) in dm_queue_flush()
2398 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) in dm_swap_table()
2443 static int lock_fs(struct mapped_device *md) in lock_fs()
2461 static void unlock_fs(struct mapped_device *md) in unlock_fs()
2480 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, in __dm_suspend()
2588 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) in dm_suspend()
2625 static int __dm_resume(struct mapped_device *md, struct dm_table *map) in __dm_resume()
2648 int dm_resume(struct mapped_device *md) in dm_resume()
2690 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) in __dm_internal_suspend()
2720 static void __dm_internal_resume(struct mapped_device *md) in __dm_internal_resume()
2742 void dm_internal_suspend_noflush(struct mapped_device *md) in dm_internal_suspend_noflush()
2750 void dm_internal_resume(struct mapped_device *md) in dm_internal_resume()
2763 void dm_internal_suspend_fast(struct mapped_device *md) in dm_internal_suspend_fast()
2776 void dm_internal_resume_fast(struct mapped_device *md) in dm_internal_resume_fast()
2791 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, in dm_kobject_uevent()
2815 uint32_t dm_next_uevent_seq(struct mapped_device *md) in dm_next_uevent_seq()
2820 uint32_t dm_get_event_nr(struct mapped_device *md) in dm_get_event_nr()
2825 int dm_wait_event(struct mapped_device *md, int event_nr) in dm_wait_event()
2831 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) in dm_uevent_add()
2844 struct gendisk *dm_disk(struct mapped_device *md) in dm_disk()
2850 struct kobject *dm_kobject(struct mapped_device *md) in dm_kobject()
2855 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) in dm_get_from_kobject()
2857 struct mapped_device *md; in dm_get_from_kobject()
2859 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); in dm_get_from_kobject()
2873 int dm_suspended_md(struct mapped_device *md) in dm_suspended_md()
2878 static int dm_post_suspending_md(struct mapped_device *md) in dm_post_suspending_md()
2883 int dm_suspended_internally_md(struct mapped_device *md) in dm_suspended_internally_md()
2888 int dm_test_deferred_remove_flag(struct mapped_device *md) in dm_test_deferred_remove_flag()
2911 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, in dm_alloc_md_mempools()
2980 struct mapped_device *md = bdev->bd_disk->private_data; in dm_call_pr()
3045 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_reserve()
3065 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_release()
3086 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_preempt()
3106 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_clear()