• Home
  • Raw
  • Download

Lines Matching full:lo

102  * @lo: struct loop_device
103 * @global: true if @lo is about to bind another "struct loop_device", false otherwise
111 static int loop_global_lock_killable(struct loop_device *lo, bool global) in loop_global_lock_killable() argument
120 err = mutex_lock_killable(&lo->lo_mutex); in loop_global_lock_killable()
129 * @lo: struct loop_device
130 * @global: true if @lo was about to bind another "struct loop_device", false otherwise
132 static void loop_global_unlock(struct loop_device *lo, bool global) in loop_global_unlock() argument
134 mutex_unlock(&lo->lo_mutex); in loop_global_unlock()
163 static loff_t get_loop_size(struct loop_device *lo, struct file *file) in get_loop_size() argument
165 return get_size(lo->lo_offset, lo->lo_sizelimit, file); in get_loop_size()
173 static bool lo_bdev_can_use_dio(struct loop_device *lo, in lo_bdev_can_use_dio() argument
178 if (queue_logical_block_size(lo->lo_queue) < sb_bsize) in lo_bdev_can_use_dio()
180 if (lo->lo_offset & (sb_bsize - 1)) in lo_bdev_can_use_dio()
185 static void __loop_update_dio(struct loop_device *lo, bool dio) in __loop_update_dio() argument
187 struct file *file = lo->lo_backing_file; in __loop_update_dio()
198 (!backing_bdev || lo_bdev_can_use_dio(lo, backing_bdev)); in __loop_update_dio()
200 if (lo->use_dio == use_dio) in __loop_update_dio()
211 if (lo->lo_state == Lo_bound) in __loop_update_dio()
212 blk_mq_freeze_queue(lo->lo_queue); in __loop_update_dio()
213 lo->use_dio = use_dio; in __loop_update_dio()
215 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue); in __loop_update_dio()
216 lo->lo_flags |= LO_FLAGS_DIRECT_IO; in __loop_update_dio()
218 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); in __loop_update_dio()
219 lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; in __loop_update_dio()
221 if (lo->lo_state == Lo_bound) in __loop_update_dio()
222 blk_mq_unfreeze_queue(lo->lo_queue); in __loop_update_dio()
227 * @lo: struct loop_device to set the size for
233 static void loop_set_size(struct loop_device *lo, loff_t size) in loop_set_size() argument
235 if (!set_capacity_and_notify(lo->lo_disk, size)) in loop_set_size()
236 kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); in loop_set_size()
261 static int lo_write_simple(struct loop_device *lo, struct request *rq, in lo_write_simple() argument
269 ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos); in lo_write_simple()
278 static int lo_read_simple(struct loop_device *lo, struct request *rq, in lo_read_simple() argument
288 len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); in lo_read_simple()
307 static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos, in lo_fallocate() argument
314 struct file *file = lo->lo_backing_file; in lo_fallocate()
319 if (!bdev_max_discard_sectors(lo->lo_device)) in lo_fallocate()
328 static int lo_req_flush(struct loop_device *lo, struct request *rq) in lo_req_flush() argument
330 int ret = vfs_fsync(lo->lo_backing_file, 0); in lo_req_flush()
392 static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, in lo_rw_aio() argument
400 struct file *file = lo->lo_backing_file; in lo_rw_aio()
461 static int do_req_filebacked(struct loop_device *lo, struct request *rq) in do_req_filebacked() argument
464 loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; in do_req_filebacked()
477 return lo_req_flush(lo, rq); in do_req_filebacked()
483 return lo_fallocate(lo, rq, pos, in do_req_filebacked()
488 return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE); in do_req_filebacked()
491 return lo_rw_aio(lo, cmd, pos, ITER_SOURCE); in do_req_filebacked()
493 return lo_write_simple(lo, rq, pos); in do_req_filebacked()
496 return lo_rw_aio(lo, cmd, pos, ITER_DEST); in do_req_filebacked()
498 return lo_read_simple(lo, rq, pos); in do_req_filebacked()
505 static inline void loop_update_dio(struct loop_device *lo) in loop_update_dio() argument
507 __loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) | in loop_update_dio()
508 lo->use_dio); in loop_update_dio()
511 static void loop_reread_partitions(struct loop_device *lo) in loop_reread_partitions() argument
515 mutex_lock(&lo->lo_disk->open_mutex); in loop_reread_partitions()
516 rc = bdev_disk_changed(lo->lo_disk, false); in loop_reread_partitions()
517 mutex_unlock(&lo->lo_disk->open_mutex); in loop_reread_partitions()
520 __func__, lo->lo_number, lo->lo_file_name, rc); in loop_reread_partitions()
546 /* Order wrt setting lo->lo_backing_file in loop_configure(). */ in loop_validate_file()
563 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, in loop_change_fd() argument
576 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1); in loop_change_fd()
579 error = loop_global_lock_killable(lo, is_loop); in loop_change_fd()
583 if (lo->lo_state != Lo_bound) in loop_change_fd()
588 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) in loop_change_fd()
595 old_file = lo->lo_backing_file; in loop_change_fd()
600 if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) in loop_change_fd()
604 disk_force_media_change(lo->lo_disk); in loop_change_fd()
605 blk_mq_freeze_queue(lo->lo_queue); in loop_change_fd()
606 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); in loop_change_fd()
607 lo->lo_backing_file = file; in loop_change_fd()
608 lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping); in loop_change_fd()
610 lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_change_fd()
611 loop_update_dio(lo); in loop_change_fd()
612 blk_mq_unfreeze_queue(lo->lo_queue); in loop_change_fd()
613 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; in loop_change_fd()
614 loop_global_unlock(lo, is_loop); in loop_change_fd()
631 loop_reread_partitions(lo); in loop_change_fd()
636 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); in loop_change_fd()
640 loop_global_unlock(lo, is_loop); in loop_change_fd()
652 struct loop_device *lo = disk->private_data; in loop_attr_show() local
654 return callback(lo, page); in loop_attr_show()
667 static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) in loop_attr_backing_file_show() argument
672 spin_lock_irq(&lo->lo_lock); in loop_attr_backing_file_show()
673 if (lo->lo_backing_file) in loop_attr_backing_file_show()
674 p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1); in loop_attr_backing_file_show()
675 spin_unlock_irq(&lo->lo_lock); in loop_attr_backing_file_show()
689 static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) in loop_attr_offset_show() argument
691 return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset); in loop_attr_offset_show()
694 static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) in loop_attr_sizelimit_show() argument
696 return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); in loop_attr_sizelimit_show()
699 static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) in loop_attr_autoclear_show() argument
701 int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); in loop_attr_autoclear_show()
706 static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) in loop_attr_partscan_show() argument
708 int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); in loop_attr_partscan_show()
713 static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf) in loop_attr_dio_show() argument
715 int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO); in loop_attr_dio_show()
742 static void loop_sysfs_init(struct loop_device *lo) in loop_sysfs_init() argument
744 lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, in loop_sysfs_init()
748 static void loop_sysfs_exit(struct loop_device *lo) in loop_sysfs_exit() argument
750 if (lo->sysfs_inited) in loop_sysfs_exit()
751 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, in loop_sysfs_exit()
755 static void loop_config_discard(struct loop_device *lo) in loop_config_discard() argument
757 struct file *file = lo->lo_backing_file; in loop_config_discard()
759 struct request_queue *q = lo->lo_queue; in loop_config_discard()
810 struct loop_device *lo; member
829 static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd) in loop_queue_work() argument
836 spin_lock_irq(&lo->lo_work_lock); in loop_queue_work()
841 node = &lo->worker_tree.rb_node; in loop_queue_work()
876 worker->lo = lo; in loop_queue_work()
878 rb_insert_color(&worker->rb_node, &lo->worker_tree); in loop_queue_work()
891 work = &lo->rootcg_work; in loop_queue_work()
892 cmd_list = &lo->rootcg_cmd_list; in loop_queue_work()
895 queue_work(lo->workqueue, work); in loop_queue_work()
896 spin_unlock_irq(&lo->lo_work_lock); in loop_queue_work()
899 static void loop_set_timer(struct loop_device *lo) in loop_set_timer() argument
901 timer_reduce(&lo->timer, jiffies + LOOP_IDLE_WORKER_TIMEOUT); in loop_set_timer()
904 static void loop_free_idle_workers(struct loop_device *lo, bool delete_all) in loop_free_idle_workers() argument
908 spin_lock_irq(&lo->lo_work_lock); in loop_free_idle_workers()
909 list_for_each_entry_safe(worker, pos, &lo->idle_worker_list, in loop_free_idle_workers()
916 rb_erase(&worker->rb_node, &lo->worker_tree); in loop_free_idle_workers()
920 if (!list_empty(&lo->idle_worker_list)) in loop_free_idle_workers()
921 loop_set_timer(lo); in loop_free_idle_workers()
922 spin_unlock_irq(&lo->lo_work_lock); in loop_free_idle_workers()
927 struct loop_device *lo = container_of(timer, struct loop_device, timer); in loop_free_idle_workers_timer() local
929 return loop_free_idle_workers(lo, false); in loop_free_idle_workers_timer()
932 static void loop_update_rotational(struct loop_device *lo) in loop_update_rotational() argument
934 struct file *file = lo->lo_backing_file; in loop_update_rotational()
937 struct request_queue *q = lo->lo_queue; in loop_update_rotational()
952 * @lo: struct loop_device to configure
959 loop_set_status_from_info(struct loop_device *lo, in loop_set_status_from_info() argument
982 lo->lo_offset = info->lo_offset; in loop_set_status_from_info()
983 lo->lo_sizelimit = info->lo_sizelimit; in loop_set_status_from_info()
985 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); in loop_set_status_from_info()
986 lo->lo_file_name[LO_NAME_SIZE-1] = 0; in loop_set_status_from_info()
987 lo->lo_flags = info->lo_flags; in loop_set_status_from_info()
991 static int loop_configure(struct loop_device *lo, blk_mode_t mode, in loop_configure() argument
1021 error = loop_global_lock_killable(lo, is_loop); in loop_configure()
1026 if (lo->lo_state != Lo_unbound) in loop_configure()
1047 error = loop_set_status_from_info(lo, &config->info); in loop_configure()
1053 lo->lo_flags |= LO_FLAGS_READ_ONLY; in loop_configure()
1055 if (!lo->workqueue) { in loop_configure()
1056 lo->workqueue = alloc_workqueue("loop%d", in loop_configure()
1058 0, lo->lo_number); in loop_configure()
1059 if (!lo->workqueue) { in loop_configure()
1066 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1); in loop_configure()
1068 disk_force_media_change(lo->lo_disk); in loop_configure()
1069 set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0); in loop_configure()
1071 lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO; in loop_configure()
1072 lo->lo_device = bdev; in loop_configure()
1073 lo->lo_backing_file = file; in loop_configure()
1074 lo->old_gfp_mask = mapping_gfp_mask(mapping); in loop_configure()
1075 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_configure()
1077 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) in loop_configure()
1078 blk_queue_write_cache(lo->lo_queue, true, false); in loop_configure()
1082 else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev) in loop_configure()
1088 blk_queue_logical_block_size(lo->lo_queue, bsize); in loop_configure()
1089 blk_queue_physical_block_size(lo->lo_queue, bsize); in loop_configure()
1090 blk_queue_io_min(lo->lo_queue, bsize); in loop_configure()
1092 loop_config_discard(lo); in loop_configure()
1093 loop_update_rotational(lo); in loop_configure()
1094 loop_update_dio(lo); in loop_configure()
1095 loop_sysfs_init(lo); in loop_configure()
1097 size = get_loop_size(lo, file); in loop_configure()
1098 loop_set_size(lo, size); in loop_configure()
1103 lo->lo_state = Lo_bound; in loop_configure()
1105 lo->lo_flags |= LO_FLAGS_PARTSCAN; in loop_configure()
1106 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; in loop_configure()
1108 clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); in loop_configure()
1111 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); in loop_configure()
1113 loop_global_unlock(lo, is_loop); in loop_configure()
1115 loop_reread_partitions(lo); in loop_configure()
1123 loop_global_unlock(lo, is_loop); in loop_configure()
1134 static void __loop_clr_fd(struct loop_device *lo, bool release) in __loop_clr_fd() argument
1137 gfp_t gfp = lo->old_gfp_mask; in __loop_clr_fd()
1139 if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags)) in __loop_clr_fd()
1140 blk_queue_write_cache(lo->lo_queue, false, false); in __loop_clr_fd()
1148 blk_mq_freeze_queue(lo->lo_queue); in __loop_clr_fd()
1150 spin_lock_irq(&lo->lo_lock); in __loop_clr_fd()
1151 filp = lo->lo_backing_file; in __loop_clr_fd()
1152 lo->lo_backing_file = NULL; in __loop_clr_fd()
1153 spin_unlock_irq(&lo->lo_lock); in __loop_clr_fd()
1155 lo->lo_device = NULL; in __loop_clr_fd()
1156 lo->lo_offset = 0; in __loop_clr_fd()
1157 lo->lo_sizelimit = 0; in __loop_clr_fd()
1158 memset(lo->lo_file_name, 0, LO_NAME_SIZE); in __loop_clr_fd()
1159 blk_queue_logical_block_size(lo->lo_queue, 512); in __loop_clr_fd()
1160 blk_queue_physical_block_size(lo->lo_queue, 512); in __loop_clr_fd()
1161 blk_queue_io_min(lo->lo_queue, 512); in __loop_clr_fd()
1162 invalidate_disk(lo->lo_disk); in __loop_clr_fd()
1163 loop_sysfs_exit(lo); in __loop_clr_fd()
1165 kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); in __loop_clr_fd()
1170 blk_mq_unfreeze_queue(lo->lo_queue); in __loop_clr_fd()
1172 disk_force_media_change(lo->lo_disk); in __loop_clr_fd()
1174 if (lo->lo_flags & LO_FLAGS_PARTSCAN) { in __loop_clr_fd()
1186 mutex_lock(&lo->lo_disk->open_mutex); in __loop_clr_fd()
1187 err = bdev_disk_changed(lo->lo_disk, false); in __loop_clr_fd()
1189 mutex_unlock(&lo->lo_disk->open_mutex); in __loop_clr_fd()
1192 __func__, lo->lo_number, err); in __loop_clr_fd()
1197 * lo->lo_state is set to Lo_unbound here after above partscan has in __loop_clr_fd()
1200 * change the 'lo' device. in __loop_clr_fd()
1202 lo->lo_flags = 0; in __loop_clr_fd()
1204 set_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); in __loop_clr_fd()
1205 mutex_lock(&lo->lo_mutex); in __loop_clr_fd()
1206 lo->lo_state = Lo_unbound; in __loop_clr_fd()
1207 mutex_unlock(&lo->lo_mutex); in __loop_clr_fd()
1217 static int loop_clr_fd(struct loop_device *lo) in loop_clr_fd() argument
1230 err = loop_global_lock_killable(lo, true); in loop_clr_fd()
1233 if (lo->lo_state != Lo_bound) { in loop_clr_fd()
1234 loop_global_unlock(lo, true); in loop_clr_fd()
1247 if (disk_openers(lo->lo_disk) > 1) { in loop_clr_fd()
1248 lo->lo_flags |= LO_FLAGS_AUTOCLEAR; in loop_clr_fd()
1249 loop_global_unlock(lo, true); in loop_clr_fd()
1252 lo->lo_state = Lo_rundown; in loop_clr_fd()
1253 loop_global_unlock(lo, true); in loop_clr_fd()
1255 __loop_clr_fd(lo, false); in loop_clr_fd()
1260 loop_set_status(struct loop_device *lo, const struct loop_info64 *info) in loop_set_status() argument
1267 err = mutex_lock_killable(&lo->lo_mutex); in loop_set_status()
1270 if (lo->lo_state != Lo_bound) { in loop_set_status()
1275 if (lo->lo_offset != info->lo_offset || in loop_set_status()
1276 lo->lo_sizelimit != info->lo_sizelimit) { in loop_set_status()
1278 sync_blockdev(lo->lo_device); in loop_set_status()
1279 invalidate_bdev(lo->lo_device); in loop_set_status()
1283 blk_mq_freeze_queue(lo->lo_queue); in loop_set_status()
1285 prev_lo_flags = lo->lo_flags; in loop_set_status()
1287 err = loop_set_status_from_info(lo, info); in loop_set_status()
1292 lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS; in loop_set_status()
1294 lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS; in loop_set_status()
1296 lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS; in loop_set_status()
1299 loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit, in loop_set_status()
1300 lo->lo_backing_file); in loop_set_status()
1301 loop_set_size(lo, new_size); in loop_set_status()
1304 loop_config_discard(lo); in loop_set_status()
1307 __loop_update_dio(lo, lo->use_dio); in loop_set_status()
1310 blk_mq_unfreeze_queue(lo->lo_queue); in loop_set_status()
1312 if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) && in loop_set_status()
1314 clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); in loop_set_status()
1318 mutex_unlock(&lo->lo_mutex); in loop_set_status()
1320 loop_reread_partitions(lo); in loop_set_status()
1326 loop_get_status(struct loop_device *lo, struct loop_info64 *info) in loop_get_status() argument
1332 ret = mutex_lock_killable(&lo->lo_mutex); in loop_get_status()
1335 if (lo->lo_state != Lo_bound) { in loop_get_status()
1336 mutex_unlock(&lo->lo_mutex); in loop_get_status()
1341 info->lo_number = lo->lo_number; in loop_get_status()
1342 info->lo_offset = lo->lo_offset; in loop_get_status()
1343 info->lo_sizelimit = lo->lo_sizelimit; in loop_get_status()
1344 info->lo_flags = lo->lo_flags; in loop_get_status()
1345 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE); in loop_get_status()
1348 path = lo->lo_backing_file->f_path; in loop_get_status()
1350 mutex_unlock(&lo->lo_mutex); in loop_get_status()
1398 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg) in loop_set_status_old() argument
1406 return loop_set_status(lo, &info64); in loop_set_status_old()
1410 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg) in loop_set_status64() argument
1416 return loop_set_status(lo, &info64); in loop_set_status64()
1420 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { in loop_get_status_old() argument
1427 err = loop_get_status(lo, &info64); in loop_get_status_old()
1437 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { in loop_get_status64() argument
1443 err = loop_get_status(lo, &info64); in loop_get_status64()
1450 static int loop_set_capacity(struct loop_device *lo) in loop_set_capacity() argument
1454 if (unlikely(lo->lo_state != Lo_bound)) in loop_set_capacity()
1457 size = get_loop_size(lo, lo->lo_backing_file); in loop_set_capacity()
1458 loop_set_size(lo, size); in loop_set_capacity()
1463 static int loop_set_dio(struct loop_device *lo, unsigned long arg) in loop_set_dio() argument
1466 if (lo->lo_state != Lo_bound) in loop_set_dio()
1469 __loop_update_dio(lo, !!arg); in loop_set_dio()
1470 if (lo->use_dio == !!arg) in loop_set_dio()
1477 static int loop_set_block_size(struct loop_device *lo, unsigned long arg) in loop_set_block_size() argument
1481 if (lo->lo_state != Lo_bound) in loop_set_block_size()
1488 if (lo->lo_queue->limits.logical_block_size == arg) in loop_set_block_size()
1491 sync_blockdev(lo->lo_device); in loop_set_block_size()
1492 invalidate_bdev(lo->lo_device); in loop_set_block_size()
1494 blk_mq_freeze_queue(lo->lo_queue); in loop_set_block_size()
1495 blk_queue_logical_block_size(lo->lo_queue, arg); in loop_set_block_size()
1496 blk_queue_physical_block_size(lo->lo_queue, arg); in loop_set_block_size()
1497 blk_queue_io_min(lo->lo_queue, arg); in loop_set_block_size()
1498 loop_update_dio(lo); in loop_set_block_size()
1499 blk_mq_unfreeze_queue(lo->lo_queue); in loop_set_block_size()
1504 static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, in lo_simple_ioctl() argument
1509 err = mutex_lock_killable(&lo->lo_mutex); in lo_simple_ioctl()
1514 err = loop_set_capacity(lo); in lo_simple_ioctl()
1517 err = loop_set_dio(lo, arg); in lo_simple_ioctl()
1520 err = loop_set_block_size(lo, arg); in lo_simple_ioctl()
1525 mutex_unlock(&lo->lo_mutex); in lo_simple_ioctl()
1532 struct loop_device *lo = bdev->bd_disk->private_data; in lo_ioctl() local
1548 return loop_configure(lo, mode, bdev, &config); in lo_ioctl()
1556 return loop_configure(lo, mode, bdev, &config); in lo_ioctl()
1559 return loop_change_fd(lo, bdev, arg); in lo_ioctl()
1561 return loop_clr_fd(lo); in lo_ioctl()
1565 err = loop_set_status_old(lo, argp); in lo_ioctl()
1568 return loop_get_status_old(lo, argp); in lo_ioctl()
1572 err = loop_set_status64(lo, argp); in lo_ioctl()
1575 return loop_get_status64(lo, argp); in lo_ioctl()
1583 err = lo_simple_ioctl(lo, cmd, arg); in lo_ioctl()
1663 loop_set_status_compat(struct loop_device *lo, in loop_set_status_compat() argument
1672 return loop_set_status(lo, &info64); in loop_set_status_compat()
1676 loop_get_status_compat(struct loop_device *lo, in loop_get_status_compat() argument
1684 err = loop_get_status(lo, &info64); in loop_get_status_compat()
1693 struct loop_device *lo = bdev->bd_disk->private_data; in lo_compat_ioctl() local
1698 err = loop_set_status_compat(lo, in lo_compat_ioctl()
1702 err = loop_get_status_compat(lo, in lo_compat_ioctl()
1728 struct loop_device *lo = disk->private_data; in lo_release() local
1733 mutex_lock(&lo->lo_mutex); in lo_release()
1734 if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR)) { in lo_release()
1735 lo->lo_state = Lo_rundown; in lo_release()
1736 mutex_unlock(&lo->lo_mutex); in lo_release()
1741 __loop_clr_fd(lo, true); in lo_release()
1744 mutex_unlock(&lo->lo_mutex); in lo_release()
1749 struct loop_device *lo = disk->private_data; in lo_free_disk() local
1751 if (lo->workqueue) in lo_free_disk()
1752 destroy_workqueue(lo->workqueue); in lo_free_disk()
1753 loop_free_idle_workers(lo, true); in lo_free_disk()
1754 timer_shutdown_sync(&lo->timer); in lo_free_disk()
1755 mutex_destroy(&lo->lo_mutex); in lo_free_disk()
1756 kfree(lo); in lo_free_disk()
1847 struct loop_device *lo = rq->q->queuedata; in loop_queue_rq() local
1851 if (lo->lo_state != Lo_bound) in loop_queue_rq()
1861 cmd->use_aio = lo->use_dio; in loop_queue_rq()
1880 loop_queue_work(lo, cmd); in loop_queue_rq()
1891 struct loop_device *lo = rq->q->queuedata; in loop_handle_cmd() local
1896 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { in loop_handle_cmd()
1913 ret = do_req_filebacked(lo, rq); in loop_handle_cmd()
1935 struct list_head *cmd_list, struct loop_device *lo) in loop_process_work() argument
1941 spin_lock_irq(&lo->lo_work_lock); in loop_process_work()
1946 spin_unlock_irq(&lo->lo_work_lock); in loop_process_work()
1951 spin_lock_irq(&lo->lo_work_lock); in loop_process_work()
1961 list_add_tail(&worker->idle_list, &lo->idle_worker_list); in loop_process_work()
1962 loop_set_timer(lo); in loop_process_work()
1964 spin_unlock_irq(&lo->lo_work_lock); in loop_process_work()
1972 loop_process_work(worker, &worker->cmd_list, worker->lo); in loop_workfn()
1977 struct loop_device *lo = in loop_rootcg_workfn() local
1979 loop_process_work(NULL, &lo->rootcg_cmd_list, lo); in loop_rootcg_workfn()
1989 struct loop_device *lo; in loop_add() local
1994 lo = kzalloc(sizeof(*lo), GFP_KERNEL); in loop_add()
1995 if (!lo) in loop_add()
1997 lo->worker_tree = RB_ROOT; in loop_add()
1998 INIT_LIST_HEAD(&lo->idle_worker_list); in loop_add()
1999 timer_setup(&lo->timer, loop_free_idle_workers_timer, TIMER_DEFERRABLE); in loop_add()
2000 lo->lo_state = Lo_unbound; in loop_add()
2008 err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL); in loop_add()
2012 err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL); in loop_add()
2019 lo->tag_set.ops = &loop_mq_ops; in loop_add()
2020 lo->tag_set.nr_hw_queues = 1; in loop_add()
2021 lo->tag_set.queue_depth = hw_queue_depth; in loop_add()
2022 lo->tag_set.numa_node = NUMA_NO_NODE; in loop_add()
2023 lo->tag_set.cmd_size = sizeof(struct loop_cmd); in loop_add()
2024 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING | in loop_add()
2026 lo->tag_set.driver_data = lo; in loop_add()
2028 err = blk_mq_alloc_tag_set(&lo->tag_set); in loop_add()
2032 disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, lo); in loop_add()
2037 lo->lo_queue = lo->lo_disk->queue; in loop_add()
2039 blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS); in loop_add()
2047 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); in loop_add()
2069 mutex_init(&lo->lo_mutex); in loop_add()
2070 lo->lo_number = i; in loop_add()
2071 spin_lock_init(&lo->lo_lock); in loop_add()
2072 spin_lock_init(&lo->lo_work_lock); in loop_add()
2073 INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn); in loop_add()
2074 INIT_LIST_HEAD(&lo->rootcg_cmd_list); in loop_add()
2079 disk->private_data = lo; in loop_add()
2080 disk->queue = lo->lo_queue; in loop_add()
2091 lo->idr_visible = true; in loop_add()
2099 blk_mq_free_tag_set(&lo->tag_set); in loop_add()
2105 kfree(lo); in loop_add()
2110 static void loop_remove(struct loop_device *lo) in loop_remove() argument
2113 del_gendisk(lo->lo_disk); in loop_remove()
2114 blk_mq_free_tag_set(&lo->tag_set); in loop_remove()
2117 idr_remove(&loop_index_idr, lo->lo_number); in loop_remove()
2120 put_disk(lo->lo_disk); in loop_remove()
2138 struct loop_device *lo; in loop_control_remove() local
2150 lo = idr_find(&loop_index_idr, idx); in loop_control_remove()
2151 if (!lo || !lo->idr_visible) in loop_control_remove()
2154 lo->idr_visible = false; in loop_control_remove()
2160 ret = mutex_lock_killable(&lo->lo_mutex); in loop_control_remove()
2163 if (lo->lo_state != Lo_unbound || disk_openers(lo->lo_disk) > 0) { in loop_control_remove()
2164 mutex_unlock(&lo->lo_mutex); in loop_control_remove()
2169 lo->lo_state = Lo_deleting; in loop_control_remove()
2170 mutex_unlock(&lo->lo_mutex); in loop_control_remove()
2172 loop_remove(lo); in loop_control_remove()
2178 lo->idr_visible = true; in loop_control_remove()
2185 struct loop_device *lo; in loop_control_get_free() local
2191 idr_for_each_entry(&loop_index_idr, lo, id) { in loop_control_get_free()
2193 if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound) in loop_control_get_free()
2290 struct loop_device *lo; in loop_exit() local
2302 idr_for_each_entry(&loop_index_idr, lo, id) in loop_exit()
2303 loop_remove(lo); in loop_exit()