• Home
  • Raw
  • Download

Lines Matching refs:bdev

40 	struct block_device bdev;  member
53 return &BDEV_I(inode)->bdev; in I_BDEV()
57 static void bdev_write_inode(struct block_device *bdev) in bdev_write_inode() argument
59 struct inode *inode = bdev->bd_inode; in bdev_write_inode()
70 bdevname(bdev, name), ret); in bdev_write_inode()
78 static void kill_bdev(struct block_device *bdev) in kill_bdev() argument
80 struct address_space *mapping = bdev->bd_inode->i_mapping; in kill_bdev()
90 void invalidate_bdev(struct block_device *bdev) in invalidate_bdev() argument
92 struct address_space *mapping = bdev->bd_inode->i_mapping; in invalidate_bdev()
110 int truncate_bdev_range(struct block_device *bdev, fmode_t mode, in truncate_bdev_range() argument
122 claimed_bdev = bdev->bd_contains; in truncate_bdev_range()
123 err = bd_prepare_to_claim(bdev, claimed_bdev, in truncate_bdev_range()
128 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend); in truncate_bdev_range()
130 bd_abort_claiming(bdev, claimed_bdev, truncate_bdev_range); in truncate_bdev_range()
138 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping, in truncate_bdev_range()
144 static void set_init_blocksize(struct block_device *bdev) in set_init_blocksize() argument
146 unsigned int bsize = bdev_logical_block_size(bdev); in set_init_blocksize()
147 loff_t size = i_size_read(bdev->bd_inode); in set_init_blocksize()
154 bdev->bd_inode->i_blkbits = blksize_bits(bsize); in set_init_blocksize()
157 int set_blocksize(struct block_device *bdev, int size) in set_blocksize() argument
164 if (size < bdev_logical_block_size(bdev)) in set_blocksize()
168 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) { in set_blocksize()
169 sync_blockdev(bdev); in set_blocksize()
170 bdev->bd_inode->i_blkbits = blksize_bits(size); in set_blocksize()
171 kill_bdev(bdev); in set_blocksize()
241 struct block_device *bdev = I_BDEV(bdev_file_inode(file)); in __blkdev_direct_IO_simple() local
250 (bdev_logical_block_size(bdev) - 1)) in __blkdev_direct_IO_simple()
263 bio_set_dev(&bio, bdev); in __blkdev_direct_IO_simple()
294 !blk_poll(bdev_get_queue(bdev), qc, true)) in __blkdev_direct_IO_simple()
329 struct block_device *bdev = I_BDEV(kiocb->ki_filp->f_mapping->host); in blkdev_iopoll() local
330 struct request_queue *q = bdev_get_queue(bdev); in blkdev_iopoll()
379 struct block_device *bdev = I_BDEV(inode); in __blkdev_direct_IO() local
390 (bdev_logical_block_size(bdev) - 1)) in __blkdev_direct_IO()
416 bio_set_dev(bio, bdev); in __blkdev_direct_IO()
490 !blk_poll(bdev_get_queue(bdev), qc, true)) in __blkdev_direct_IO()
524 int __sync_blockdev(struct block_device *bdev, int wait) in __sync_blockdev() argument
526 if (!bdev) in __sync_blockdev()
529 return filemap_flush(bdev->bd_inode->i_mapping); in __sync_blockdev()
530 return filemap_write_and_wait(bdev->bd_inode->i_mapping); in __sync_blockdev()
537 int sync_blockdev(struct block_device *bdev) in sync_blockdev() argument
539 return __sync_blockdev(bdev, 1); in sync_blockdev()
548 int fsync_bdev(struct block_device *bdev) in fsync_bdev() argument
550 struct super_block *sb = get_super(bdev); in fsync_bdev()
556 return sync_blockdev(bdev); in fsync_bdev()
572 int freeze_bdev(struct block_device *bdev) in freeze_bdev() argument
577 mutex_lock(&bdev->bd_fsfreeze_mutex); in freeze_bdev()
578 if (++bdev->bd_fsfreeze_count > 1) in freeze_bdev()
581 sb = get_active_super(bdev); in freeze_bdev()
591 bdev->bd_fsfreeze_count--; in freeze_bdev()
594 bdev->bd_fsfreeze_sb = sb; in freeze_bdev()
597 sync_blockdev(bdev); in freeze_bdev()
599 mutex_unlock(&bdev->bd_fsfreeze_mutex); in freeze_bdev()
610 int thaw_bdev(struct block_device *bdev) in thaw_bdev() argument
615 mutex_lock(&bdev->bd_fsfreeze_mutex); in thaw_bdev()
616 if (!bdev->bd_fsfreeze_count) in thaw_bdev()
620 if (--bdev->bd_fsfreeze_count > 0) in thaw_bdev()
623 sb = bdev->bd_fsfreeze_sb; in thaw_bdev()
632 bdev->bd_fsfreeze_count++; in thaw_bdev()
634 mutex_unlock(&bdev->bd_fsfreeze_mutex); in thaw_bdev()
694 struct block_device *bdev = I_BDEV(bd_inode); in blkdev_fsync() local
706 error = blkdev_issue_flush(bdev, GFP_KERNEL); in blkdev_fsync()
730 int bdev_read_page(struct block_device *bdev, sector_t sector, in bdev_read_page() argument
733 const struct block_device_operations *ops = bdev->bd_disk->fops; in bdev_read_page()
736 if (!ops->rw_page || bdev_get_integrity(bdev)) in bdev_read_page()
739 result = blk_queue_enter(bdev->bd_disk->queue, 0); in bdev_read_page()
742 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, in bdev_read_page()
744 blk_queue_exit(bdev->bd_disk->queue); in bdev_read_page()
767 int bdev_write_page(struct block_device *bdev, sector_t sector, in bdev_write_page() argument
771 const struct block_device_operations *ops = bdev->bd_disk->fops; in bdev_write_page()
773 if (!ops->rw_page || bdev_get_integrity(bdev)) in bdev_write_page()
775 result = blk_queue_enter(bdev->bd_disk->queue, 0); in bdev_write_page()
780 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, in bdev_write_page()
788 blk_queue_exit(bdev->bd_disk->queue); in bdev_write_page()
815 struct block_device *bdev = &ei->bdev; in init_once() local
817 memset(bdev, 0, sizeof(*bdev)); in init_once()
818 mutex_init(&bdev->bd_mutex); in init_once()
820 INIT_LIST_HEAD(&bdev->bd_holder_disks); in init_once()
822 bdev->bd_bdi = &noop_backing_dev_info; in init_once()
825 mutex_init(&bdev->bd_fsfreeze_mutex); in init_once()
830 struct block_device *bdev = &BDEV_I(inode)->bdev; in bdev_evict_inode() local
836 if (bdev->bd_bdi != &noop_backing_dev_info) { in bdev_evict_inode()
837 bdi_put(bdev->bd_bdi); in bdev_evict_inode()
838 bdev->bd_bdi = &noop_backing_dev_info; in bdev_evict_inode()
899 return BDEV_I(inode)->bdev.bd_dev == *(dev_t *)data; in bdev_test()
904 BDEV_I(inode)->bdev.bd_dev = *(dev_t *)data; in bdev_set()
910 struct block_device *bdev; in bdget() local
919 bdev = &BDEV_I(inode)->bdev; in bdget()
922 spin_lock_init(&bdev->bd_size_lock); in bdget()
923 bdev->bd_contains = NULL; in bdget()
924 bdev->bd_super = NULL; in bdget()
925 bdev->bd_inode = inode; in bdget()
926 bdev->bd_part_count = 0; in bdget()
929 inode->i_bdev = bdev; in bdget()
934 return bdev; in bdget()
941 struct block_device *bdgrab(struct block_device *bdev) in bdgrab() argument
943 ihold(bdev->bd_inode); in bdgrab()
944 return bdev; in bdgrab()
966 void bdput(struct block_device *bdev) in bdput() argument
968 iput(bdev->bd_inode); in bdput()
975 struct block_device *bdev; in bd_acquire() local
978 bdev = inode->i_bdev; in bd_acquire()
979 if (bdev && !inode_unhashed(bdev->bd_inode)) { in bd_acquire()
980 bdgrab(bdev); in bd_acquire()
982 return bdev; in bd_acquire()
992 if (bdev) in bd_acquire()
995 bdev = bdget(inode->i_rdev); in bd_acquire()
996 if (bdev) { in bd_acquire()
1005 bdgrab(bdev); in bd_acquire()
1006 inode->i_bdev = bdev; in bd_acquire()
1007 inode->i_mapping = bdev->bd_inode->i_mapping; in bd_acquire()
1011 return bdev; in bd_acquire()
1018 struct block_device *bdev = NULL; in bd_forget() local
1022 bdev = inode->i_bdev; in bd_forget()
1027 if (bdev) in bd_forget()
1028 bdput(bdev); in bd_forget()
1045 static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, in bd_may_claim() argument
1048 if (bdev->bd_holder == holder) in bd_may_claim()
1050 else if (bdev->bd_holder != NULL) in bd_may_claim()
1052 else if (whole == bdev) in bd_may_claim()
1076 int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole, in bd_prepare_to_claim() argument
1082 if (!bd_may_claim(bdev, whole, holder)) { in bd_prepare_to_claim()
1106 static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno) in bdev_get_gendisk() argument
1108 struct gendisk *disk = get_gendisk(bdev->bd_dev, partno); in bdev_get_gendisk()
1120 if (inode_unhashed(bdev->bd_inode)) { in bdev_get_gendisk()
1145 static void bd_finish_claiming(struct block_device *bdev, in bd_finish_claiming() argument
1149 BUG_ON(!bd_may_claim(bdev, whole, holder)); in bd_finish_claiming()
1156 bdev->bd_holders++; in bd_finish_claiming()
1157 bdev->bd_holder = holder; in bd_finish_claiming()
1172 void bd_abort_claiming(struct block_device *bdev, struct block_device *whole, in bd_abort_claiming() argument
1188 static struct bd_holder_disk *bd_find_holder_disk(struct block_device *bdev, in bd_find_holder_disk() argument
1193 list_for_each_entry(holder, &bdev->bd_holder_disks, list) in bd_find_holder_disk()
1237 int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) in bd_link_disk_holder() argument
1242 mutex_lock(&bdev->bd_mutex); in bd_link_disk_holder()
1244 WARN_ON_ONCE(!bdev->bd_holder); in bd_link_disk_holder()
1247 if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir)) in bd_link_disk_holder()
1250 holder = bd_find_holder_disk(bdev, disk); in bd_link_disk_holder()
1266 ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); in bd_link_disk_holder()
1270 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); in bd_link_disk_holder()
1277 kobject_get(bdev->bd_part->holder_dir); in bd_link_disk_holder()
1279 list_add(&holder->list, &bdev->bd_holder_disks); in bd_link_disk_holder()
1283 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); in bd_link_disk_holder()
1287 mutex_unlock(&bdev->bd_mutex); in bd_link_disk_holder()
1302 void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) in bd_unlink_disk_holder() argument
1306 mutex_lock(&bdev->bd_mutex); in bd_unlink_disk_holder()
1308 holder = bd_find_holder_disk(bdev, disk); in bd_unlink_disk_holder()
1311 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); in bd_unlink_disk_holder()
1312 del_symlink(bdev->bd_part->holder_dir, in bd_unlink_disk_holder()
1314 kobject_put(bdev->bd_part->holder_dir); in bd_unlink_disk_holder()
1319 mutex_unlock(&bdev->bd_mutex); in bd_unlink_disk_holder()
1335 struct block_device *bdev, bool verbose) in check_disk_size_change() argument
1339 spin_lock(&bdev->bd_size_lock); in check_disk_size_change()
1341 bdev_size = i_size_read(bdev->bd_inode); in check_disk_size_change()
1348 i_size_write(bdev->bd_inode, disk_size); in check_disk_size_change()
1350 spin_unlock(&bdev->bd_size_lock); in check_disk_size_change()
1353 if (__invalidate_device(bdev, false)) in check_disk_size_change()
1370 struct block_device *bdev; in revalidate_disk_size() local
1379 bdev = bdget_disk(disk, 0); in revalidate_disk_size()
1380 if (bdev) { in revalidate_disk_size()
1381 check_disk_size_change(disk, bdev, verbose); in revalidate_disk_size()
1382 bdput(bdev); in revalidate_disk_size()
1387 void bd_set_nr_sectors(struct block_device *bdev, sector_t sectors) in bd_set_nr_sectors() argument
1389 spin_lock(&bdev->bd_size_lock); in bd_set_nr_sectors()
1390 i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT); in bd_set_nr_sectors()
1391 spin_unlock(&bdev->bd_size_lock); in bd_set_nr_sectors()
1395 static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part);
1397 int bdev_disk_changed(struct block_device *bdev, bool invalidate) in bdev_disk_changed() argument
1399 struct gendisk *disk = bdev->bd_disk; in bdev_disk_changed()
1402 lockdep_assert_held(&bdev->bd_mutex); in bdev_disk_changed()
1408 ret = blk_drop_partitions(bdev); in bdev_disk_changed()
1431 check_disk_size_change(disk, bdev, !invalidate); in bdev_disk_changed()
1434 ret = blk_add_partitions(disk, bdev); in bdev_disk_changed()
1460 static int __blkdev_get(struct block_device *bdev, fmode_t mode, void *holder, in __blkdev_get() argument
1472 disk = bdev_get_gendisk(bdev, &partno); in __blkdev_get()
1489 claiming = bdev; in __blkdev_get()
1490 ret = bd_prepare_to_claim(bdev, claiming, holder); in __blkdev_get()
1496 mutex_lock_nested(&bdev->bd_mutex, for_part); in __blkdev_get()
1497 if (!bdev->bd_openers) { in __blkdev_get()
1499 bdev->bd_disk = disk; in __blkdev_get()
1500 bdev->bd_contains = bdev; in __blkdev_get()
1501 bdev->bd_partno = partno; in __blkdev_get()
1505 bdev->bd_part = disk_get_part(disk, partno); in __blkdev_get()
1506 if (!bdev->bd_part) in __blkdev_get()
1511 ret = disk->fops->open(bdev, mode); in __blkdev_get()
1521 bd_set_nr_sectors(bdev, get_capacity(disk)); in __blkdev_get()
1522 set_init_blocksize(bdev); in __blkdev_get()
1533 bdev_disk_changed(bdev, ret == -ENOMEDIUM); in __blkdev_get()
1542 bdev->bd_contains = bdgrab(whole); in __blkdev_get()
1543 bdev->bd_part = disk_get_part(disk, partno); in __blkdev_get()
1545 !bdev->bd_part || !bdev->bd_part->nr_sects) { in __blkdev_get()
1549 bd_set_nr_sectors(bdev, bdev->bd_part->nr_sects); in __blkdev_get()
1550 set_init_blocksize(bdev); in __blkdev_get()
1553 if (bdev->bd_bdi == &noop_backing_dev_info) in __blkdev_get()
1554 bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info); in __blkdev_get()
1556 if (bdev->bd_contains == bdev) { in __blkdev_get()
1558 if (bdev->bd_disk->fops->open) in __blkdev_get()
1559 ret = bdev->bd_disk->fops->open(bdev, mode); in __blkdev_get()
1563 bdev_disk_changed(bdev, ret == -ENOMEDIUM); in __blkdev_get()
1568 bdev->bd_openers++; in __blkdev_get()
1570 bdev->bd_part_count++; in __blkdev_get()
1572 bd_finish_claiming(bdev, claiming, holder); in __blkdev_get()
1580 if (claiming && (mode & FMODE_WRITE) && !bdev->bd_write_holder && in __blkdev_get()
1582 bdev->bd_write_holder = true; in __blkdev_get()
1585 mutex_unlock(&bdev->bd_mutex); in __blkdev_get()
1598 disk_put_part(bdev->bd_part); in __blkdev_get()
1599 bdev->bd_disk = NULL; in __blkdev_get()
1600 bdev->bd_part = NULL; in __blkdev_get()
1601 if (bdev != bdev->bd_contains) in __blkdev_get()
1602 __blkdev_put(bdev->bd_contains, mode, 1); in __blkdev_get()
1603 bdev->bd_contains = NULL; in __blkdev_get()
1606 bd_abort_claiming(bdev, claiming, holder); in __blkdev_get()
1607 mutex_unlock(&bdev->bd_mutex); in __blkdev_get()
1639 static int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) in blkdev_get() argument
1647 ret = devcgroup_inode_permission(bdev->bd_inode, perm); in blkdev_get()
1651 ret =__blkdev_get(bdev, mode, holder, 0); in blkdev_get()
1657 bdput(bdev); in blkdev_get()
1681 struct block_device *bdev; in blkdev_get_by_path() local
1684 bdev = lookup_bdev(path); in blkdev_get_by_path()
1685 if (IS_ERR(bdev)) in blkdev_get_by_path()
1686 return bdev; in blkdev_get_by_path()
1688 err = blkdev_get(bdev, mode, holder); in blkdev_get_by_path()
1692 if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) { in blkdev_get_by_path()
1693 blkdev_put(bdev, mode); in blkdev_get_by_path()
1697 return bdev; in blkdev_get_by_path()
1725 struct block_device *bdev; in blkdev_get_by_dev() local
1728 bdev = bdget(dev); in blkdev_get_by_dev()
1729 if (!bdev) in blkdev_get_by_dev()
1732 err = blkdev_get(bdev, mode, holder); in blkdev_get_by_dev()
1736 return bdev; in blkdev_get_by_dev()
1742 struct block_device *bdev; in blkdev_open() local
1761 bdev = bd_acquire(inode); in blkdev_open()
1762 if (bdev == NULL) in blkdev_open()
1765 filp->f_mapping = bdev->bd_inode->i_mapping; in blkdev_open()
1768 return blkdev_get(bdev, filp->f_mode, filp); in blkdev_open()
1771 static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) in __blkdev_put() argument
1773 struct gendisk *disk = bdev->bd_disk; in __blkdev_put()
1783 if (bdev->bd_openers == 1) in __blkdev_put()
1784 sync_blockdev(bdev); in __blkdev_put()
1786 mutex_lock_nested(&bdev->bd_mutex, for_part); in __blkdev_put()
1788 bdev->bd_part_count--; in __blkdev_put()
1790 if (!--bdev->bd_openers) { in __blkdev_put()
1791 WARN_ON_ONCE(bdev->bd_holders); in __blkdev_put()
1792 sync_blockdev(bdev); in __blkdev_put()
1793 kill_bdev(bdev); in __blkdev_put()
1795 bdev_write_inode(bdev); in __blkdev_put()
1797 if (bdev->bd_contains == bdev) { in __blkdev_put()
1801 if (!bdev->bd_openers) { in __blkdev_put()
1802 disk_put_part(bdev->bd_part); in __blkdev_put()
1803 bdev->bd_part = NULL; in __blkdev_put()
1804 bdev->bd_disk = NULL; in __blkdev_put()
1805 if (bdev != bdev->bd_contains) in __blkdev_put()
1806 victim = bdev->bd_contains; in __blkdev_put()
1807 bdev->bd_contains = NULL; in __blkdev_put()
1811 mutex_unlock(&bdev->bd_mutex); in __blkdev_put()
1812 bdput(bdev); in __blkdev_put()
1817 void blkdev_put(struct block_device *bdev, fmode_t mode) in blkdev_put() argument
1819 mutex_lock(&bdev->bd_mutex); in blkdev_put()
1831 WARN_ON_ONCE(--bdev->bd_holders < 0); in blkdev_put()
1832 WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0); in blkdev_put()
1835 if ((bdev_free = !bdev->bd_holders)) in blkdev_put()
1836 bdev->bd_holder = NULL; in blkdev_put()
1837 if (!bdev->bd_contains->bd_holders) in blkdev_put()
1838 bdev->bd_contains->bd_holder = NULL; in blkdev_put()
1846 if (bdev_free && bdev->bd_write_holder) { in blkdev_put()
1847 disk_unblock_events(bdev->bd_disk); in blkdev_put()
1848 bdev->bd_write_holder = false; in blkdev_put()
1857 disk_flush_events(bdev->bd_disk, DISK_EVENT_MEDIA_CHANGE); in blkdev_put()
1859 mutex_unlock(&bdev->bd_mutex); in blkdev_put()
1861 __blkdev_put(bdev, mode, 0); in blkdev_put()
1867 struct block_device *bdev = I_BDEV(bdev_file_inode(filp)); in blkdev_close() local
1868 blkdev_put(bdev, filp->f_mode); in blkdev_close()
1874 struct block_device *bdev = I_BDEV(bdev_file_inode(file)); in block_ioctl() local
1886 return blkdev_ioctl(bdev, mode, cmd, arg); in block_ioctl()
1966 struct super_block *super = BDEV_I(page->mapping->host)->bdev.bd_super; in blkdev_releasepage()
2000 struct block_device *bdev = I_BDEV(bdev_file_inode(file)); in blkdev_fallocate() local
2010 isize = i_size_read(bdev->bd_inode); in blkdev_fallocate()
2024 if ((start | len) & (bdev_logical_block_size(bdev) - 1)) in blkdev_fallocate()
2034 error = truncate_bdev_range(bdev, file->f_mode, start, end); in blkdev_fallocate()
2038 error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, in blkdev_fallocate()
2042 error = truncate_bdev_range(bdev, file->f_mode, start, end); in blkdev_fallocate()
2046 error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, in blkdev_fallocate()
2050 error = truncate_bdev_range(bdev, file->f_mode, start, end); in blkdev_fallocate()
2054 error = blkdev_issue_discard(bdev, start >> 9, len >> 9, in blkdev_fallocate()
2068 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping, in blkdev_fallocate()
2101 struct block_device *bdev; in lookup_bdev() local
2121 bdev = bd_acquire(inode); in lookup_bdev()
2122 if (!bdev) in lookup_bdev()
2126 return bdev; in lookup_bdev()
2128 bdev = ERR_PTR(error); in lookup_bdev()
2133 int __invalidate_device(struct block_device *bdev, bool kill_dirty) in __invalidate_device() argument
2135 struct super_block *sb = get_super(bdev); in __invalidate_device()
2149 invalidate_bdev(bdev); in __invalidate_device()
2161 struct block_device *bdev; in iterate_bdevs() local
2182 bdev = I_BDEV(inode); in iterate_bdevs()
2184 mutex_lock(&bdev->bd_mutex); in iterate_bdevs()
2185 if (bdev->bd_openers) in iterate_bdevs()
2186 func(bdev, arg); in iterate_bdevs()
2187 mutex_unlock(&bdev->bd_mutex); in iterate_bdevs()