• Home
  • Raw
  • Download

Lines Matching +full:foo +full:- +full:queue

1 // SPDX-License-Identifier: GPL-2.0-only
18 #include <linux/backing-dev.h>
53 return &BDEV_I(inode)->bdev; in I_BDEV()
59 struct inode *inode = bdev->bd_inode; in bdev_write_inode()
62 spin_lock(&inode->i_lock); in bdev_write_inode()
63 while (inode->i_state & I_DIRTY) { in bdev_write_inode()
64 spin_unlock(&inode->i_lock); in bdev_write_inode()
72 spin_lock(&inode->i_lock); in bdev_write_inode()
74 spin_unlock(&inode->i_lock); in bdev_write_inode()
80 struct address_space *mapping = bdev->bd_inode->i_mapping; in kill_bdev()
82 if (mapping->nrpages == 0 && mapping->nrexceptional == 0) in kill_bdev()
92 struct address_space *mapping = bdev->bd_inode->i_mapping; in invalidate_bdev()
94 if (mapping->nrpages) { in invalidate_bdev()
97 invalidate_mapping_pages(mapping, 0, -1); in invalidate_bdev()
122 claimed_bdev = bdev->bd_contains; in truncate_bdev_range()
128 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend); in truncate_bdev_range()
138 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping, in truncate_bdev_range()
147 loff_t size = i_size_read(bdev->bd_inode); in set_init_blocksize()
154 bdev->bd_inode->i_blkbits = blksize_bits(bsize); in set_init_blocksize()
161 return -EINVAL; in set_blocksize()
165 return -EINVAL; in set_blocksize()
168 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) { in set_blocksize()
170 bdev->bd_inode->i_blkbits = blksize_bits(size); in set_blocksize()
180 if (set_blocksize(sb->s_bdev, size)) in sb_set_blocksize()
184 sb->s_blocksize = size; in sb_set_blocksize()
185 sb->s_blocksize_bits = blksize_bits(size); in sb_set_blocksize()
186 return sb->s_blocksize; in sb_set_blocksize()
193 int minsize = bdev_logical_block_size(sb->s_bdev); in sb_min_blocksize()
205 bh->b_bdev = I_BDEV(inode); in blkdev_get_block()
206 bh->b_blocknr = iblock; in blkdev_get_block()
213 return file->f_mapping->host; in bdev_file_inode()
221 if (iocb->ki_flags & IOCB_DSYNC) in dio_bio_write_op()
230 struct task_struct *waiter = bio->bi_private; in blkdev_bio_end_io_simple()
232 WRITE_ONCE(bio->bi_private, NULL); in blkdev_bio_end_io_simple()
240 struct file *file = iocb->ki_filp; in __blkdev_direct_IO_simple()
243 loff_t pos = iocb->ki_pos; in __blkdev_direct_IO_simple()
250 (bdev_logical_block_size(bdev) - 1)) in __blkdev_direct_IO_simple()
251 return -EINVAL; in __blkdev_direct_IO_simple()
259 return -ENOMEM; in __blkdev_direct_IO_simple()
265 bio.bi_write_hint = iocb->ki_hint; in __blkdev_direct_IO_simple()
268 bio.bi_ioprio = iocb->ki_ioprio; in __blkdev_direct_IO_simple()
283 if (iocb->ki_flags & IOCB_NOWAIT) in __blkdev_direct_IO_simple()
285 if (iocb->ki_flags & IOCB_HIPRI) in __blkdev_direct_IO_simple()
293 if (!(iocb->ki_flags & IOCB_HIPRI) || in __blkdev_direct_IO_simple()
329 struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host); in blkdev_iopoll()
332 return blk_poll(q, READ_ONCE(kiocb->ki_cookie), wait); in blkdev_iopoll()
337 struct blkdev_dio *dio = bio->bi_private; in blkdev_bio_end_io()
338 bool should_dirty = dio->should_dirty; in blkdev_bio_end_io()
340 if (bio->bi_status && !dio->bio.bi_status) in blkdev_bio_end_io()
341 dio->bio.bi_status = bio->bi_status; in blkdev_bio_end_io()
343 if (!dio->multi_bio || atomic_dec_and_test(&dio->ref)) { in blkdev_bio_end_io()
344 if (!dio->is_sync) { in blkdev_bio_end_io()
345 struct kiocb *iocb = dio->iocb; in blkdev_bio_end_io()
348 if (likely(!dio->bio.bi_status)) { in blkdev_bio_end_io()
349 ret = dio->size; in blkdev_bio_end_io()
350 iocb->ki_pos += ret; in blkdev_bio_end_io()
352 ret = blk_status_to_errno(dio->bio.bi_status); in blkdev_bio_end_io()
355 dio->iocb->ki_complete(iocb, ret, 0); in blkdev_bio_end_io()
356 if (dio->multi_bio) in blkdev_bio_end_io()
357 bio_put(&dio->bio); in blkdev_bio_end_io()
359 struct task_struct *waiter = dio->waiter; in blkdev_bio_end_io()
361 WRITE_ONCE(dio->waiter, NULL); in blkdev_bio_end_io()
377 struct file *file = iocb->ki_filp; in __blkdev_direct_IO()
383 bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; in __blkdev_direct_IO()
385 loff_t pos = iocb->ki_pos; in __blkdev_direct_IO()
390 (bdev_logical_block_size(bdev) - 1)) in __blkdev_direct_IO()
391 return -EINVAL; in __blkdev_direct_IO()
396 dio->is_sync = is_sync = is_sync_kiocb(iocb); in __blkdev_direct_IO()
397 if (dio->is_sync) { in __blkdev_direct_IO()
398 dio->waiter = current; in __blkdev_direct_IO()
401 dio->iocb = iocb; in __blkdev_direct_IO()
404 dio->size = 0; in __blkdev_direct_IO()
405 dio->multi_bio = false; in __blkdev_direct_IO()
406 dio->should_dirty = is_read && iter_is_iovec(iter); in __blkdev_direct_IO()
417 bio->bi_iter.bi_sector = pos >> 9; in __blkdev_direct_IO()
418 bio->bi_write_hint = iocb->ki_hint; in __blkdev_direct_IO()
419 bio->bi_private = dio; in __blkdev_direct_IO()
420 bio->bi_end_io = blkdev_bio_end_io; in __blkdev_direct_IO()
421 bio->bi_ioprio = iocb->ki_ioprio; in __blkdev_direct_IO()
425 bio->bi_status = BLK_STS_IOERR; in __blkdev_direct_IO()
431 bio->bi_opf = REQ_OP_READ; in __blkdev_direct_IO()
432 if (dio->should_dirty) in __blkdev_direct_IO()
435 bio->bi_opf = dio_bio_write_op(iocb); in __blkdev_direct_IO()
436 task_io_account_write(bio->bi_iter.bi_size); in __blkdev_direct_IO()
438 if (iocb->ki_flags & IOCB_NOWAIT) in __blkdev_direct_IO()
439 bio->bi_opf |= REQ_NOWAIT; in __blkdev_direct_IO()
441 dio->size += bio->bi_iter.bi_size; in __blkdev_direct_IO()
442 pos += bio->bi_iter.bi_size; in __blkdev_direct_IO()
448 if (iocb->ki_flags & IOCB_HIPRI) { in __blkdev_direct_IO()
456 WRITE_ONCE(iocb->ki_cookie, qc); in __blkdev_direct_IO()
460 if (!dio->multi_bio) { in __blkdev_direct_IO()
468 dio->multi_bio = true; in __blkdev_direct_IO()
469 atomic_set(&dio->ref, 2); in __blkdev_direct_IO()
471 atomic_inc(&dio->ref); in __blkdev_direct_IO()
482 return -EIOCBQUEUED; in __blkdev_direct_IO()
486 if (!READ_ONCE(dio->waiter)) in __blkdev_direct_IO()
489 if (!(iocb->ki_flags & IOCB_HIPRI) || in __blkdev_direct_IO()
496 ret = blk_status_to_errno(dio->bio.bi_status); in __blkdev_direct_IO()
498 ret = dio->size; in __blkdev_direct_IO()
500 bio_put(&dio->bio); in __blkdev_direct_IO()
529 return filemap_flush(bdev->bd_inode->i_mapping); in __sync_blockdev()
530 return filemap_write_and_wait(bdev->bd_inode->i_mapping); in __sync_blockdev()
561 * freeze_bdev -- lock a filesystem and force it into a consistent state
577 mutex_lock(&bdev->bd_fsfreeze_mutex); in freeze_bdev()
578 if (++bdev->bd_fsfreeze_count > 1) { in freeze_bdev()
580 * We don't even need to grab a reference - the first call in freeze_bdev()
587 mutex_unlock(&bdev->bd_fsfreeze_mutex); in freeze_bdev()
594 if (sb->s_op->freeze_super) in freeze_bdev()
595 error = sb->s_op->freeze_super(sb); in freeze_bdev()
600 bdev->bd_fsfreeze_count--; in freeze_bdev()
601 mutex_unlock(&bdev->bd_fsfreeze_mutex); in freeze_bdev()
607 mutex_unlock(&bdev->bd_fsfreeze_mutex); in freeze_bdev()
608 return sb; /* thaw_bdev releases s->s_umount */ in freeze_bdev()
613 * thaw_bdev -- unlock filesystem
621 int error = -EINVAL; in thaw_bdev()
623 mutex_lock(&bdev->bd_fsfreeze_mutex); in thaw_bdev()
624 if (!bdev->bd_fsfreeze_count) in thaw_bdev()
628 if (--bdev->bd_fsfreeze_count > 0) in thaw_bdev()
634 if (sb->s_op->thaw_super) in thaw_bdev()
635 error = sb->s_op->thaw_super(sb); in thaw_bdev()
639 bdev->bd_fsfreeze_count++; in thaw_bdev()
641 mutex_unlock(&bdev->bd_fsfreeze_mutex); in thaw_bdev()
684 * for a block special file file_inode(file)->i_size is zero
714 if (error == -EOPNOTSUPP) in blkdev_fsync()
722 * bdev_read_page() - Start reading a page from a block device
732 * queue full; callers should try a different route to read this page rather
740 const struct block_device_operations *ops = bdev->bd_disk->fops; in bdev_read_page()
741 int result = -EOPNOTSUPP; in bdev_read_page()
743 if (!ops->rw_page || bdev_get_integrity(bdev)) in bdev_read_page()
746 result = blk_queue_enter(bdev->bd_disk->queue, 0); in bdev_read_page()
749 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, in bdev_read_page()
751 blk_queue_exit(bdev->bd_disk->queue); in bdev_read_page()
756 * bdev_write_page() - Start writing a page to a block device
765 * queue the page to the device), the page will still be locked. If the
766 * caller is a ->writepage implementation, it will need to unlock the page.
769 * queue full; callers should try a different route to write this page rather
778 const struct block_device_operations *ops = bdev->bd_disk->fops; in bdev_write_page()
780 if (!ops->rw_page || bdev_get_integrity(bdev)) in bdev_write_page()
781 return -EOPNOTSUPP; in bdev_write_page()
782 result = blk_queue_enter(bdev->bd_disk->queue, 0); in bdev_write_page()
787 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, in bdev_write_page()
795 blk_queue_exit(bdev->bd_disk->queue); in bdev_write_page()
800 * pseudo-fs
811 return &ei->vfs_inode; in bdev_alloc_inode()
819 static void init_once(void *foo) in init_once() argument
821 struct bdev_inode *ei = (struct bdev_inode *) foo; in init_once()
822 struct block_device *bdev = &ei->bdev; in init_once()
825 mutex_init(&bdev->bd_mutex); in init_once()
827 INIT_LIST_HEAD(&bdev->bd_holder_disks); in init_once()
829 bdev->bd_bdi = &noop_backing_dev_info; in init_once()
830 inode_init_once(&ei->vfs_inode); in init_once()
832 mutex_init(&bdev->bd_fsfreeze_mutex); in init_once()
837 struct block_device *bdev = &BDEV_I(inode)->bdev; in bdev_evict_inode()
838 truncate_inode_pages_final(&inode->i_data); in bdev_evict_inode()
841 /* Detach inode from wb early as bdi_put() may free bdi->wb */ in bdev_evict_inode()
843 if (bdev->bd_bdi != &noop_backing_dev_info) { in bdev_evict_inode()
844 bdi_put(bdev->bd_bdi); in bdev_evict_inode()
845 bdev->bd_bdi = &noop_backing_dev_info; in bdev_evict_inode()
861 return -ENOMEM; in bd_init_fs_context()
862 fc->s_iflags |= SB_I_CGROUPWB; in bd_init_fs_context()
863 ctx->ops = &bdev_sops; in bd_init_fs_context()
887 panic("Cannot register bdev pseudo-fs"); in bdev_cache_init()
890 panic("Cannot create bdev pseudo-fs"); in bdev_cache_init()
891 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ in bdev_cache_init()
895 * Most likely _very_ bad one - but then it's hardly critical for small
906 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data; in bdev_test()
911 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data; in bdev_set()
926 bdev = &BDEV_I(inode)->bdev; in bdget()
928 if (inode->i_state & I_NEW) { in bdget()
929 spin_lock_init(&bdev->bd_size_lock); in bdget()
930 bdev->bd_contains = NULL; in bdget()
931 bdev->bd_super = NULL; in bdget()
932 bdev->bd_inode = inode; in bdget()
933 bdev->bd_part_count = 0; in bdget()
934 inode->i_mode = S_IFBLK; in bdget()
935 inode->i_rdev = dev; in bdget()
936 inode->i_bdev = bdev; in bdget()
937 inode->i_data.a_ops = &def_blk_aops; in bdget()
938 mapping_set_gfp_mask(&inode->i_data, GFP_USER); in bdget()
945 * bdgrab -- Grab a reference to an already referenced block device
950 ihold(bdev->bd_inode); in bdgrab()
965 spin_lock(&blockdev_superblock->s_inode_list_lock); in nr_blockdev_pages()
966 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) in nr_blockdev_pages()
967 ret += inode->i_mapping->nrpages; in nr_blockdev_pages()
968 spin_unlock(&blockdev_superblock->s_inode_list_lock); in nr_blockdev_pages()
975 iput(bdev->bd_inode); in bdput()
985 bdev = inode->i_bdev; in bd_acquire()
986 if (bdev && !inode_unhashed(bdev->bd_inode)) { in bd_acquire()
1002 bdev = bdget(inode->i_rdev); in bd_acquire()
1005 if (!inode->i_bdev) { in bd_acquire()
1009 * So, we can access it via ->i_mapping always in bd_acquire()
1013 inode->i_bdev = bdev; in bd_acquire()
1014 inode->i_mapping = bdev->bd_inode->i_mapping; in bd_acquire()
1028 if (!sb_is_blkdev_sb(inode->i_sb)) in bd_forget()
1029 bdev = inode->i_bdev; in bd_forget()
1030 inode->i_bdev = NULL; in bd_forget()
1031 inode->i_mapping = &inode->i_data; in bd_forget()
1039 * bd_may_claim - test whether a block device can be claimed
1055 if (bdev->bd_holder == holder) in bd_may_claim()
1057 else if (bdev->bd_holder != NULL) in bd_may_claim()
1062 else if (whole->bd_holder == bd_may_claim) in bd_may_claim()
1064 else if (whole->bd_holder != NULL) in bd_may_claim()
1067 return true; /* is a partition of an un-held device */ in bd_may_claim()
1071 * bd_prepare_to_claim - claim a block device
1081 * 0 if @bdev can be claimed, -EBUSY otherwise.
1091 return -EBUSY; in bd_prepare_to_claim()
1095 if (whole->bd_claiming) { in bd_prepare_to_claim()
1096 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0); in bd_prepare_to_claim()
1107 whole->bd_claiming = holder; in bd_prepare_to_claim()
1115 struct gendisk *disk = get_gendisk(bdev->bd_dev, partno); in bdev_get_gendisk()
1127 if (inode_unhashed(bdev->bd_inode)) { in bdev_get_gendisk()
1138 BUG_ON(whole->bd_claiming != holder); in bd_clear_claiming()
1139 whole->bd_claiming = NULL; in bd_clear_claiming()
1140 wake_up_bit(&whole->bd_claiming, 0); in bd_clear_claiming()
1144 * bd_finish_claiming - finish claiming of a block device
1161 whole->bd_holders++; in bd_finish_claiming()
1162 whole->bd_holder = bd_may_claim; in bd_finish_claiming()
1163 bdev->bd_holders++; in bd_finish_claiming()
1164 bdev->bd_holder = holder; in bd_finish_claiming()
1170 * bd_abort_claiming - abort claiming of a block device
1200 list_for_each_entry(holder, &bdev->bd_holder_disks, list) in bd_find_holder_disk()
1201 if (holder->disk == disk) in bd_find_holder_disk()
1217 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
1225 * - from "slaves" directory of the holder @disk to the claimed @bdev
1226 * - from "holders" directory of the @bdev to the holder @disk
1228 * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
1231 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
1232 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
1242 * 0 on success, -errno on failure.
1249 mutex_lock(&bdev->bd_mutex); in bd_link_disk_holder()
1251 WARN_ON_ONCE(!bdev->bd_holder); in bd_link_disk_holder()
1254 if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir)) in bd_link_disk_holder()
1259 holder->refcnt++; in bd_link_disk_holder()
1265 ret = -ENOMEM; in bd_link_disk_holder()
1269 INIT_LIST_HEAD(&holder->list); in bd_link_disk_holder()
1270 holder->disk = disk; in bd_link_disk_holder()
1271 holder->refcnt = 1; in bd_link_disk_holder()
1273 ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); in bd_link_disk_holder()
1277 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); in bd_link_disk_holder()
1284 kobject_get(bdev->bd_part->holder_dir); in bd_link_disk_holder()
1286 list_add(&holder->list, &bdev->bd_holder_disks); in bd_link_disk_holder()
1290 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); in bd_link_disk_holder()
1294 mutex_unlock(&bdev->bd_mutex); in bd_link_disk_holder()
1300 * bd_unlink_disk_holder - destroy symlinks created by bd_link_disk_holder()
1313 mutex_lock(&bdev->bd_mutex); in bd_unlink_disk_holder()
1317 if (!WARN_ON_ONCE(holder == NULL) && !--holder->refcnt) { in bd_unlink_disk_holder()
1318 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); in bd_unlink_disk_holder()
1319 del_symlink(bdev->bd_part->holder_dir, in bd_unlink_disk_holder()
1320 &disk_to_dev(disk)->kobj); in bd_unlink_disk_holder()
1321 kobject_put(bdev->bd_part->holder_dir); in bd_unlink_disk_holder()
1322 list_del_init(&holder->list); in bd_unlink_disk_holder()
1326 mutex_unlock(&bdev->bd_mutex); in bd_unlink_disk_holder()
1332 * check_disk_size_change - checks for disk size change and adjusts bdev size.
1346 spin_lock(&bdev->bd_size_lock); in check_disk_size_change()
1348 bdev_size = i_size_read(bdev->bd_inode); in check_disk_size_change()
1353 disk->disk_name, bdev_size, disk_size); in check_disk_size_change()
1355 i_size_write(bdev->bd_inode, disk_size); in check_disk_size_change()
1357 spin_unlock(&bdev->bd_size_lock); in check_disk_size_change()
1362 disk->disk_name); in check_disk_size_change()
1367 * revalidate_disk_size - checks for disk size change and adjusts bdev size.
1383 if (disk->flags & GENHD_FL_HIDDEN) in revalidate_disk_size()
1396 spin_lock(&bdev->bd_size_lock); in bd_set_nr_sectors()
1397 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT); in bd_set_nr_sectors()
1398 spin_unlock(&bdev->bd_size_lock); in bd_set_nr_sectors()
1406 struct gendisk *disk = bdev->bd_disk; in bdev_disk_changed()
1409 lockdep_assert_held(&bdev->bd_mutex); in bdev_disk_changed()
1411 if (!(disk->flags & GENHD_FL_UP)) in bdev_disk_changed()
1412 return -ENXIO; in bdev_disk_changed()
1419 clear_bit(GD_NEED_PART_SCAN, &disk->state); in bdev_disk_changed()
1425 * udisks polling for legacy ide-cdrom devices. Use the crude check in bdev_disk_changed()
1431 !(disk->flags & GENHD_FL_REMOVABLE)) in bdev_disk_changed()
1434 if (disk->fops->revalidate_disk) in bdev_disk_changed()
1435 disk->fops->revalidate_disk(disk); in bdev_disk_changed()
1442 if (ret == -EAGAIN) in bdev_disk_changed()
1449 kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); in bdev_disk_changed()
1463 * mutex_lock(part->bd_mutex)
1464 * mutex_lock_nested(whole->bd_mutex, 1)
1478 ret = -ENXIO; in __blkdev_get()
1486 ret = -ENOMEM; in __blkdev_get()
1503 mutex_lock_nested(&bdev->bd_mutex, for_part); in __blkdev_get()
1504 if (!bdev->bd_openers) { in __blkdev_get()
1506 bdev->bd_disk = disk; in __blkdev_get()
1507 bdev->bd_contains = bdev; in __blkdev_get()
1508 bdev->bd_partno = partno; in __blkdev_get()
1511 ret = -ENXIO; in __blkdev_get()
1512 bdev->bd_part = disk_get_part(disk, partno); in __blkdev_get()
1513 if (!bdev->bd_part) in __blkdev_get()
1517 if (disk->fops->open) { in __blkdev_get()
1518 ret = disk->fops->open(bdev, mode); in __blkdev_get()
1523 if (ret == -ERESTARTSYS) in __blkdev_get()
1534 * if open succeeded or failed with -ENOMEDIUM. in __blkdev_get()
1538 if (test_bit(GD_NEED_PART_SCAN, &disk->state) && in __blkdev_get()
1539 (!ret || ret == -ENOMEDIUM)) in __blkdev_get()
1540 bdev_disk_changed(bdev, ret == -ENOMEDIUM); in __blkdev_get()
1549 bdev->bd_contains = bdgrab(whole); in __blkdev_get()
1550 bdev->bd_part = disk_get_part(disk, partno); in __blkdev_get()
1551 if (!(disk->flags & GENHD_FL_UP) || in __blkdev_get()
1552 !bdev->bd_part || !bdev->bd_part->nr_sects) { in __blkdev_get()
1553 ret = -ENXIO; in __blkdev_get()
1556 bd_set_nr_sectors(bdev, bdev->bd_part->nr_sects); in __blkdev_get()
1560 if (bdev->bd_bdi == &noop_backing_dev_info) in __blkdev_get()
1561 bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info); in __blkdev_get()
1563 if (bdev->bd_contains == bdev) { in __blkdev_get()
1565 if (bdev->bd_disk->fops->open) in __blkdev_get()
1566 ret = bdev->bd_disk->fops->open(bdev, mode); in __blkdev_get()
1568 if (test_bit(GD_NEED_PART_SCAN, &disk->state) && in __blkdev_get()
1569 (!ret || ret == -ENOMEDIUM)) in __blkdev_get()
1570 bdev_disk_changed(bdev, ret == -ENOMEDIUM); in __blkdev_get()
1575 bdev->bd_openers++; in __blkdev_get()
1577 bdev->bd_part_count++; in __blkdev_get()
1587 if (claiming && (mode & FMODE_WRITE) && !bdev->bd_write_holder && in __blkdev_get()
1588 (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) { in __blkdev_get()
1589 bdev->bd_write_holder = true; in __blkdev_get()
1592 mutex_unlock(&bdev->bd_mutex); in __blkdev_get()
1605 disk_put_part(bdev->bd_part); in __blkdev_get()
1606 bdev->bd_disk = NULL; in __blkdev_get()
1607 bdev->bd_part = NULL; in __blkdev_get()
1608 if (bdev != bdev->bd_contains) in __blkdev_get()
1609 __blkdev_put(bdev->bd_contains, mode, 1); in __blkdev_get()
1610 bdev->bd_contains = NULL; in __blkdev_get()
1614 mutex_unlock(&bdev->bd_mutex); in __blkdev_get()
1628 * blkdev_get - open a block device
1644 * 0 on success, -errno on failure.
1654 ret = devcgroup_inode_permission(bdev->bd_inode, perm); in blkdev_get()
1669 * blkdev_get_by_path - open a block device by name
1683 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1701 return ERR_PTR(-EACCES); in blkdev_get_by_path()
1709 * blkdev_get_by_dev - open a block device by device number
1717 * Use it ONLY if you really do not have anything better - i.e. when
1720 * ever need it - reconsider your API.
1728 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1737 return ERR_PTR(-ENOMEM); in blkdev_get_by_dev()
1757 filp->f_flags |= O_LARGEFILE; in blkdev_open()
1759 filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; in blkdev_open()
1761 if (filp->f_flags & O_NDELAY) in blkdev_open()
1762 filp->f_mode |= FMODE_NDELAY; in blkdev_open()
1763 if (filp->f_flags & O_EXCL) in blkdev_open()
1764 filp->f_mode |= FMODE_EXCL; in blkdev_open()
1765 if ((filp->f_flags & O_ACCMODE) == 3) in blkdev_open()
1766 filp->f_mode |= FMODE_WRITE_IOCTL; in blkdev_open()
1770 return -ENOMEM; in blkdev_open()
1772 filp->f_mapping = bdev->bd_inode->i_mapping; in blkdev_open()
1773 filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping); in blkdev_open()
1775 return blkdev_get(bdev, filp->f_mode, filp); in blkdev_open()
1780 struct gendisk *disk = bdev->bd_disk; in __blkdev_put()
1790 if (bdev->bd_openers == 1) in __blkdev_put()
1793 mutex_lock_nested(&bdev->bd_mutex, for_part); in __blkdev_put()
1795 bdev->bd_part_count--; in __blkdev_put()
1797 if (!--bdev->bd_openers) { in __blkdev_put()
1798 WARN_ON_ONCE(bdev->bd_holders); in __blkdev_put()
1804 if (bdev->bd_contains == bdev) { in __blkdev_put()
1805 if (disk->fops->release) in __blkdev_put()
1806 disk->fops->release(disk, mode); in __blkdev_put()
1808 if (!bdev->bd_openers) { in __blkdev_put()
1809 disk_put_part(bdev->bd_part); in __blkdev_put()
1810 bdev->bd_part = NULL; in __blkdev_put()
1811 bdev->bd_disk = NULL; in __blkdev_put()
1812 if (bdev != bdev->bd_contains) in __blkdev_put()
1813 victim = bdev->bd_contains; in __blkdev_put()
1814 bdev->bd_contains = NULL; in __blkdev_put()
1818 mutex_unlock(&bdev->bd_mutex); in __blkdev_put()
1826 mutex_lock(&bdev->bd_mutex); in blkdev_put()
1838 WARN_ON_ONCE(--bdev->bd_holders < 0); in blkdev_put()
1839 WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0); in blkdev_put()
1842 if ((bdev_free = !bdev->bd_holders)) in blkdev_put()
1843 bdev->bd_holder = NULL; in blkdev_put()
1844 if (!bdev->bd_contains->bd_holders) in blkdev_put()
1845 bdev->bd_contains->bd_holder = NULL; in blkdev_put()
1853 if (bdev_free && bdev->bd_write_holder) { in blkdev_put()
1854 disk_unblock_events(bdev->bd_disk); in blkdev_put()
1855 bdev->bd_write_holder = false; in blkdev_put()
1862 * from userland - e.g. eject(1). in blkdev_put()
1864 disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE); in blkdev_put()
1866 mutex_unlock(&bdev->bd_mutex); in blkdev_put()
1875 blkdev_put(bdev, filp->f_mode); in blkdev_close()
1882 fmode_t mode = file->f_mode; in block_ioctl()
1888 if (file->f_flags & O_NDELAY) in block_ioctl()
1905 struct file *file = iocb->ki_filp; in blkdev_write_iter()
1913 return -EPERM; in blkdev_write_iter()
1915 if (IS_SWAPFILE(bd_inode) && !is_hibernate_resume_dev(bd_inode->i_rdev)) in blkdev_write_iter()
1916 return -ETXTBSY; in blkdev_write_iter()
1921 if (iocb->ki_pos >= size) in blkdev_write_iter()
1922 return -ENOSPC; in blkdev_write_iter()
1924 if ((iocb->ki_flags & (IOCB_NOWAIT | IOCB_DIRECT)) == IOCB_NOWAIT) in blkdev_write_iter()
1925 return -EOPNOTSUPP; in blkdev_write_iter()
1927 size -= iocb->ki_pos; in blkdev_write_iter()
1929 shorted = iov_iter_count(from) - size; in blkdev_write_iter()
1945 struct file *file = iocb->ki_filp; in blkdev_read_iter()
1948 loff_t pos = iocb->ki_pos; in blkdev_read_iter()
1955 size -= pos; in blkdev_read_iter()
1957 shorted = iov_iter_count(to) - size; in blkdev_read_iter()
1993 loff_t end = start + len - 1; in blkdev_fallocate()
1999 return -EOPNOTSUPP; in blkdev_fallocate()
2002 isize = i_size_read(bdev->bd_inode); in blkdev_fallocate()
2004 return -EINVAL; in blkdev_fallocate()
2007 len = isize - start; in blkdev_fallocate()
2008 end = start + len - 1; in blkdev_fallocate()
2010 return -EINVAL; in blkdev_fallocate()
2016 if ((start | len) & (bdev_logical_block_size(bdev) - 1)) in blkdev_fallocate()
2017 return -EINVAL; in blkdev_fallocate()
2020 error = truncate_bdev_range(bdev, file->f_mode, start, end); in blkdev_fallocate()
2039 return -EOPNOTSUPP; in blkdev_fallocate()
2046 * a page, we just discard it - userspace has no way of knowing whether in blkdev_fallocate()
2049 return truncate_bdev_range(bdev, file->f_mode, start, end); in blkdev_fallocate()
2071 * lookup_bdev - lookup a struct block_device by name
2086 return ERR_PTR(-EINVAL); in lookup_bdev()
2093 error = -ENOTBLK; in lookup_bdev()
2094 if (!S_ISBLK(inode->i_mode)) in lookup_bdev()
2096 error = -EACCES; in lookup_bdev()
2099 error = -ENOMEM; in lookup_bdev()
2121 * under us (->put_super runs with the write lock in __invalidate_device()
2137 spin_lock(&blockdev_superblock->s_inode_list_lock); in iterate_bdevs()
2138 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) { in iterate_bdevs()
2139 struct address_space *mapping = inode->i_mapping; in iterate_bdevs()
2142 spin_lock(&inode->i_lock); in iterate_bdevs()
2143 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) || in iterate_bdevs()
2144 mapping->nrpages == 0) { in iterate_bdevs()
2145 spin_unlock(&inode->i_lock); in iterate_bdevs()
2149 spin_unlock(&inode->i_lock); in iterate_bdevs()
2150 spin_unlock(&blockdev_superblock->s_inode_list_lock); in iterate_bdevs()
2163 mutex_lock(&bdev->bd_mutex); in iterate_bdevs()
2164 if (bdev->bd_openers) in iterate_bdevs()
2166 mutex_unlock(&bdev->bd_mutex); in iterate_bdevs()
2168 spin_lock(&blockdev_superblock->s_inode_list_lock); in iterate_bdevs()
2170 spin_unlock(&blockdev_superblock->s_inode_list_lock); in iterate_bdevs()