Home
last modified time | relevance | path

Searched refs:bdev (Results 1 – 25 of 59) sorted by relevance

123

/fs/
Dblock_dev.c36 struct block_device bdev; member
49 return &BDEV_I(inode)->bdev; in I_BDEV()
53 static void bdev_write_inode(struct block_device *bdev) in bdev_write_inode() argument
55 struct inode *inode = bdev->bd_inode; in bdev_write_inode()
66 bdevname(bdev, name), ret); in bdev_write_inode()
74 void kill_bdev(struct block_device *bdev) in kill_bdev() argument
76 struct address_space *mapping = bdev->bd_inode->i_mapping; in kill_bdev()
87 void invalidate_bdev(struct block_device *bdev) in invalidate_bdev() argument
89 struct address_space *mapping = bdev->bd_inode->i_mapping; in invalidate_bdev()
103 int set_blocksize(struct block_device *bdev, int size) in set_blocksize() argument
[all …]
Dsuper.c606 struct super_block *get_super(struct block_device *bdev) in get_super() argument
610 if (!bdev) in get_super()
618 if (sb->s_bdev == bdev) { in get_super()
647 struct super_block *get_super_thawed(struct block_device *bdev) in get_super_thawed() argument
650 struct super_block *s = get_super(bdev); in get_super_thawed()
669 struct super_block *get_active_super(struct block_device *bdev) in get_active_super() argument
673 if (!bdev) in get_active_super()
681 if (sb->s_bdev == bdev) { in get_active_super()
998 struct block_device *bdev; in mount_bdev() local
1006 bdev = blkdev_get_by_path(dev_name, mode, fs_type); in mount_bdev()
[all …]
Dmbcache.c396 mb_cache_shrink(struct block_device *bdev) in mb_cache_shrink() argument
407 if (ce->e_bdev == bdev) { in mb_cache_shrink()
579 mb_cache_entry_insert(struct mb_cache_entry *ce, struct block_device *bdev, in mb_cache_entry_insert() argument
590 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), in mb_cache_entry_insert()
595 if (lce->e_bdev == bdev && lce->e_block == block) { in mb_cache_entry_insert()
603 ce->e_bdev = bdev; in mb_cache_entry_insert()
661 mb_cache_entry_get(struct mb_cache *cache, struct block_device *bdev, in mb_cache_entry_get() argument
669 bucket = hash_long((unsigned long)bdev + (block & 0xffffffff), in mb_cache_entry_get()
676 if (ce->e_bdev == bdev && ce->e_block == block) { in mb_cache_entry_get()
720 struct block_device *bdev, unsigned int key) in __mb_cache_entry_find() argument
[all …]
Dmpage.c103 mpage_alloc(struct block_device *bdev, in mpage_alloc() argument
117 bio->bi_bdev = bdev; in mpage_alloc()
191 struct block_device *bdev = NULL; in do_mpage_readpage() local
228 bdev = map_bh->b_bdev; in do_mpage_readpage()
283 bdev = map_bh->b_bdev; in do_mpage_readpage()
312 if (!bdev_read_page(bdev, blocks[0] << (blkbits - 9), in do_mpage_readpage()
316 bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), in do_mpage_readpage()
516 struct block_device *bdev = NULL; in __mpage_writepage() local
561 bdev = bh->b_bdev; in __mpage_writepage()
602 bdev = map_bh.b_bdev; in __mpage_writepage()
[all …]
Dbuffer.c202 __find_get_block_slow(struct block_device *bdev, sector_t block) in __find_get_block_slow() argument
204 struct inode *bd_inode = bdev->bd_inode; in __find_get_block_slow()
248 printk("device %s blocksize: %d\n", bdevname(bdev, b), in __find_get_block_slow()
592 void write_boundary_block(struct block_device *bdev, in write_boundary_block() argument
595 struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize); in write_boundary_block()
942 static sector_t blkdev_max_block(struct block_device *bdev, unsigned int size) in blkdev_max_block() argument
945 loff_t sz = i_size_read(bdev->bd_inode); in blkdev_max_block()
958 init_page_buffers(struct page *page, struct block_device *bdev, in init_page_buffers() argument
964 sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode), size); in init_page_buffers()
969 bh->b_bdev = bdev; in init_page_buffers()
[all …]
Dsync.c82 static void fdatawrite_one_bdev(struct block_device *bdev, void *arg) in fdatawrite_one_bdev() argument
84 filemap_fdatawrite(bdev->bd_inode->i_mapping); in fdatawrite_one_bdev()
87 static void fdatawait_one_bdev(struct block_device *bdev, void *arg) in fdatawait_one_bdev() argument
94 filemap_fdatawait_keep_errors(bdev->bd_inode->i_mapping); in fdatawait_one_bdev()
Ddirect-io.c359 struct block_device *bdev, in dio_bio_alloc() argument
370 bio->bi_bdev = bdev; in dio_bio_alloc()
1111 struct block_device *bdev, struct iov_iter *iter, in do_blockdev_direct_IO() argument
1133 if (bdev) in do_blockdev_direct_IO()
1134 blkbits = blksize_bits(bdev_logical_block_size(bdev)); in do_blockdev_direct_IO()
1326 struct block_device *bdev, struct iov_iter *iter, in __blockdev_direct_IO() argument
1339 prefetch(&bdev->bd_disk->part_tbl); in __blockdev_direct_IO()
1340 prefetch(bdev->bd_queue); in __blockdev_direct_IO()
1341 prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); in __blockdev_direct_IO()
1343 return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block, in __blockdev_direct_IO()
Dinternal.h25 extern int __sync_blockdev(struct block_device *bdev, int wait);
32 static inline int __sync_blockdev(struct block_device *bdev, int wait) in __sync_blockdev() argument
/fs/logfs/
Ddev_bdev.c17 static int sync_request(struct page *page, struct block_device *bdev, int rw) in sync_request() argument
29 bio.bi_bdev = bdev; in sync_request()
39 struct block_device *bdev = logfs_super(sb)->s_bdev; in bdev_readpage() local
42 err = sync_request(page, bdev, READ); in bdev_readpage()
269 struct block_device *bdev = logfs_super(sb)->s_bdev; in bdev_write_sb() local
272 return sync_request(page, bdev, WRITE); in bdev_write_sb()
300 struct block_device *bdev; in logfs_get_sb_bdev() local
302 bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL, in logfs_get_sb_bdev()
304 if (IS_ERR(bdev)) in logfs_get_sb_bdev()
305 return PTR_ERR(bdev); in logfs_get_sb_bdev()
[all …]
/fs/udf/
Dlowlevel.c32 struct block_device *bdev = sb->s_bdev; in udf_get_last_session() local
37 i = ioctl_by_bdev(bdev, CDROMMULTISESSION, (unsigned long)&ms_info); in udf_get_last_session()
52 struct block_device *bdev = sb->s_bdev; in udf_get_last_block() local
59 if (ioctl_by_bdev(bdev, CDROM_LAST_WRITTEN, (unsigned long) &lblock) || in udf_get_last_block()
61 lblock = bdev->bd_inode->i_size >> sb->s_blocksize_bits; in udf_get_last_block()
/fs/nfs/blocklayout/
Ddev.c24 if (dev->bdev) in bl_free_device()
25 blkdev_put(dev->bdev, FMODE_READ | FMODE_WRITE); in bl_free_device()
130 map->bdev = dev->bdev; in bl_map_simple()
203 d->bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_WRITE, NULL); in bl_parse_simple()
204 if (IS_ERR(d->bdev)) { in bl_parse_simple()
206 MAJOR(dev), MINOR(dev), PTR_ERR(d->bdev)); in bl_parse_simple()
207 return PTR_ERR(d->bdev); in bl_parse_simple()
211 d->len = i_size_read(d->bdev->bd_inode); in bl_parse_simple()
215 d->bdev->bd_disk->disk_name); in bl_parse_simple()
Dblocklayout.h93 struct block_device *bdev; member
106 struct block_device *bdev; member
/fs/btrfs/
Dvolumes.c193 static void btrfs_kobject_uevent(struct block_device *bdev, in btrfs_kobject_uevent() argument
198 ret = kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, action); in btrfs_kobject_uevent()
202 kobject_name(&disk_to_dev(bdev->bd_disk)->kobj), in btrfs_kobject_uevent()
203 &disk_to_dev(bdev->bd_disk)->kobj); in btrfs_kobject_uevent()
269 int flush, struct block_device **bdev, in btrfs_get_bdev_and_sb() argument
274 *bdev = blkdev_get_by_path(device_path, flags, holder); in btrfs_get_bdev_and_sb()
276 if (IS_ERR(*bdev)) { in btrfs_get_bdev_and_sb()
277 ret = PTR_ERR(*bdev); in btrfs_get_bdev_and_sb()
282 filemap_write_and_wait((*bdev)->bd_inode->i_mapping); in btrfs_get_bdev_and_sb()
283 ret = set_blocksize(*bdev, 4096); in btrfs_get_bdev_and_sb()
[all …]
Dcheck-integrity.c199 struct block_device *bdev; member
276 struct block_device *bdev,
298 struct block_device *bdev,
387 struct block_device *bdev);
473 ds->bdev = NULL; in btrfsic_dev_state_init()
514 ((unsigned int)((uintptr_t)b->dev_state->bdev))) & in btrfsic_block_hashtable_add()
526 struct block_device *bdev, in btrfsic_block_hashtable_lookup() argument
532 ((unsigned int)((uintptr_t)bdev))) & in btrfsic_block_hashtable_lookup()
541 if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr) in btrfsic_block_hashtable_lookup()
564 ((unsigned int)((uintptr_t)l->block_ref_to->dev_state->bdev)) ^ in btrfsic_block_link_hashtable_add()
[all …]
Dcompression.c97 static struct bio *compressed_bio_alloc(struct block_device *bdev, in compressed_bio_alloc() argument
100 return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags); in compressed_bio_alloc()
340 struct block_device *bdev; in btrfs_submit_compressed_write() local
359 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; in btrfs_submit_compressed_write()
361 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); in btrfs_submit_compressed_write()
409 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); in btrfs_submit_compressed_write()
575 struct block_device *bdev; in btrfs_submit_compressed_read() local
626 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; in btrfs_submit_compressed_read()
650 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); in btrfs_submit_compressed_read()
703 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, in btrfs_submit_compressed_read()
Dsysfs.c649 if (one_device && one_device->bdev) { in btrfs_sysfs_rm_device_link()
650 disk = one_device->bdev->bd_part; in btrfs_sysfs_rm_device_link()
662 if (!one_device->bdev) in btrfs_sysfs_rm_device_link()
664 disk = one_device->bdev->bd_part; in btrfs_sysfs_rm_device_link()
696 if (!dev->bdev) in btrfs_sysfs_add_device_link()
702 disk = dev->bdev->bd_part; in btrfs_sysfs_add_device_link()
Ddisk-io.h62 struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
63 int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
Ddev-replace.c570 if (fs_info->sb->s_bdev == src_device->bdev) in btrfs_dev_replace_finishing()
571 fs_info->sb->s_bdev = tgt_device->bdev; in btrfs_dev_replace_finishing()
572 if (fs_info->fs_devices->latest_bdev == src_device->bdev) in btrfs_dev_replace_finishing()
573 fs_info->fs_devices->latest_bdev = tgt_device->bdev; in btrfs_dev_replace_finishing()
798 if (!dev_replace->tgtdev || !dev_replace->tgtdev->bdev) { in btrfs_resume_dev_replace_async()
/fs/f2fs/
Dsegment.c519 struct block_device *bdev) in __submit_flush_wait() argument
525 bio->bi_bdev = bdev; in __submit_flush_wait()
529 trace_f2fs_issue_flush(bdev, test_opt(sbi, NOBARRIER), in __submit_flush_wait()
545 ret = __submit_flush_wait(sbi, FDEV(i).bdev); in submit_flush_wait()
717 ret = __submit_flush_wait(sbi, FDEV(i).bdev); in f2fs_flush_device_cache()
806 struct block_device *bdev, block_t lstart, in __create_discard_cmd() argument
819 dc->bdev = bdev; in __create_discard_cmd()
835 struct block_device *bdev, block_t lstart, in __attach_discard_cmd() argument
842 dc = __create_discard_cmd(sbi, bdev, lstart, start, len); in __attach_discard_cmd()
870 trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len); in __remove_discard_cmd()
[all …]
/fs/gfs2/
Dops_fstype.c1235 struct block_device *bdev = ptr; in test_gfs2_super() local
1236 return (bdev == s->s_bdev); in test_gfs2_super()
1256 struct block_device *bdev; in gfs2_mount() local
1266 bdev = blkdev_get_by_path(dev_name, mode, fs_type); in gfs2_mount()
1267 if (IS_ERR(bdev)) in gfs2_mount()
1268 return ERR_CAST(bdev); in gfs2_mount()
1275 mutex_lock(&bdev->bd_fsfreeze_mutex); in gfs2_mount()
1276 if (bdev->bd_fsfreeze_count > 0) { in gfs2_mount()
1277 mutex_unlock(&bdev->bd_fsfreeze_mutex); in gfs2_mount()
1281 s = sget(fs_type, test_gfs2_super, set_gfs2_super, flags, bdev); in gfs2_mount()
[all …]
/fs/nilfs2/
Dsuper.c1215 struct block_device *bdev; member
1288 sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type); in nilfs_mount()
1289 if (IS_ERR(sd.bdev)) in nilfs_mount()
1290 return ERR_CAST(sd.bdev); in nilfs_mount()
1304 mutex_lock(&sd.bdev->bd_fsfreeze_mutex); in nilfs_mount()
1305 if (sd.bdev->bd_fsfreeze_count > 0) { in nilfs_mount()
1306 mutex_unlock(&sd.bdev->bd_fsfreeze_mutex); in nilfs_mount()
1311 sd.bdev); in nilfs_mount()
1312 mutex_unlock(&sd.bdev->bd_fsfreeze_mutex); in nilfs_mount()
1325 strlcpy(s->s_id, bdevname(sd.bdev, b), sizeof(s->s_id)); in nilfs_mount()
[all …]
/fs/jfs/
Djfs_logmgr.c1082 struct block_device *bdev; in lmLogOpen() local
1094 if (log->bdev->bd_dev == sbi->logdev) { in lmLogOpen()
1125 bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, in lmLogOpen()
1127 if (IS_ERR(bdev)) { in lmLogOpen()
1128 rc = PTR_ERR(bdev); in lmLogOpen()
1132 log->bdev = bdev; in lmLogOpen()
1166 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); in lmLogOpen()
1187 log->bdev = sb->s_bdev; in open_inline_log()
1463 struct block_device *bdev; in lmLogClose() local
1509 bdev = log->bdev; in lmLogClose()
[all …]
/fs/jbd2/
Drevoke.c329 struct block_device *bdev; in jbd2_journal_revoke() local
342 bdev = journal->j_fs_dev; in jbd2_journal_revoke()
346 bh = __find_get_block(bdev, blocknr, journal->j_blocksize); in jbd2_journal_revoke()
356 bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize); in jbd2_journal_revoke()
/fs/quota/
Dquota.c732 struct block_device *bdev; in quotactl_block() local
738 bdev = lookup_bdev(tmp->name); in quotactl_block()
740 if (IS_ERR(bdev)) in quotactl_block()
741 return ERR_CAST(bdev); in quotactl_block()
743 sb = get_super_thawed(bdev); in quotactl_block()
745 sb = get_super(bdev); in quotactl_block()
746 bdput(bdev); in quotactl_block()
/fs/xfs/
Dxfs_discard.c44 struct block_device *bdev = mp->m_ddev_targp->bt_bdev; in xfs_trim_extents() local
126 error = blkdev_issue_discard(bdev, dbno, dlen, GFP_NOFS, 0); in xfs_trim_extents()

123