1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
6 */
7
8 #include <linux/init.h>
9 #include <linux/mm.h>
10 #include <linux/slab.h>
11 #include <linux/kmod.h>
12 #include <linux/major.h>
13 #include <linux/device_cgroup.h>
14 #include <linux/blkdev.h>
15 #include <linux/backing-dev.h>
16 #include <linux/module.h>
17 #include <linux/blkpg.h>
18 #include <linux/magic.h>
19 #include <linux/buffer_head.h>
20 #include <linux/swap.h>
21 #include <linux/writeback.h>
22 #include <linux/mount.h>
23 #include <linux/pseudo_fs.h>
24 #include <linux/uio.h>
25 #include <linux/namei.h>
26 #include <linux/cleancache.h>
27 #include <linux/part_stat.h>
28 #include <linux/uaccess.h>
29 #include "../fs/internal.h"
30 #include "blk.h"
31
32 struct bdev_inode {
33 struct block_device bdev;
34 struct inode vfs_inode;
35 };
36
BDEV_I(struct inode * inode)37 static inline struct bdev_inode *BDEV_I(struct inode *inode)
38 {
39 return container_of(inode, struct bdev_inode, vfs_inode);
40 }
41
I_BDEV(struct inode * inode)42 struct block_device *I_BDEV(struct inode *inode)
43 {
44 return &BDEV_I(inode)->bdev;
45 }
46 EXPORT_SYMBOL(I_BDEV);
47
bdev_write_inode(struct block_device * bdev)48 static void bdev_write_inode(struct block_device *bdev)
49 {
50 struct inode *inode = bdev->bd_inode;
51 int ret;
52
53 spin_lock(&inode->i_lock);
54 while (inode->i_state & I_DIRTY) {
55 spin_unlock(&inode->i_lock);
56 ret = write_inode_now(inode, true);
57 if (ret) {
58 char name[BDEVNAME_SIZE];
59 pr_warn_ratelimited("VFS: Dirty inode writeback failed "
60 "for block device %s (err=%d).\n",
61 bdevname(bdev, name), ret);
62 }
63 spin_lock(&inode->i_lock);
64 }
65 spin_unlock(&inode->i_lock);
66 }
67
68 /* Kill _all_ buffers and pagecache , dirty or not.. */
kill_bdev(struct block_device * bdev)69 static void kill_bdev(struct block_device *bdev)
70 {
71 struct address_space *mapping = bdev->bd_inode->i_mapping;
72
73 if (mapping_empty(mapping))
74 return;
75
76 invalidate_bh_lrus();
77 truncate_inode_pages(mapping, 0);
78 }
79
80 /* Invalidate clean unused buffers and pagecache. */
invalidate_bdev(struct block_device * bdev)81 void invalidate_bdev(struct block_device *bdev)
82 {
83 struct address_space *mapping = bdev->bd_inode->i_mapping;
84
85 if (mapping->nrpages) {
86 invalidate_bh_lrus();
87 lru_add_drain_all(); /* make sure all lru add caches are flushed */
88 invalidate_mapping_pages(mapping, 0, -1);
89 }
90 /* 99% of the time, we don't need to flush the cleancache on the bdev.
91 * But, for the strange corners, lets be cautious
92 */
93 cleancache_invalidate_inode(mapping);
94 }
95 EXPORT_SYMBOL(invalidate_bdev);
96
97 /*
98 * Drop all buffers & page cache for given bdev range. This function bails
99 * with error if bdev has other exclusive owner (such as filesystem).
100 */
truncate_bdev_range(struct block_device * bdev,fmode_t mode,loff_t lstart,loff_t lend)101 int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
102 loff_t lstart, loff_t lend)
103 {
104 /*
105 * If we don't hold exclusive handle for the device, upgrade to it
106 * while we discard the buffer cache to avoid discarding buffers
107 * under live filesystem.
108 */
109 if (!(mode & FMODE_EXCL)) {
110 int err = bd_prepare_to_claim(bdev, truncate_bdev_range);
111 if (err)
112 goto invalidate;
113 }
114
115 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
116 if (!(mode & FMODE_EXCL))
117 bd_abort_claiming(bdev, truncate_bdev_range);
118 return 0;
119
120 invalidate:
121 /*
122 * Someone else has handle exclusively open. Try invalidating instead.
123 * The 'end' argument is inclusive so the rounding is safe.
124 */
125 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
126 lstart >> PAGE_SHIFT,
127 lend >> PAGE_SHIFT);
128 }
129
set_init_blocksize(struct block_device * bdev)130 static void set_init_blocksize(struct block_device *bdev)
131 {
132 unsigned int bsize = bdev_logical_block_size(bdev);
133 loff_t size = i_size_read(bdev->bd_inode);
134
135 while (bsize < PAGE_SIZE) {
136 if (size & bsize)
137 break;
138 bsize <<= 1;
139 }
140 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
141 }
142
set_blocksize(struct block_device * bdev,int size)143 int set_blocksize(struct block_device *bdev, int size)
144 {
145 /* Size must be a power of two, and between 512 and PAGE_SIZE */
146 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
147 return -EINVAL;
148
149 /* Size cannot be smaller than the size supported by the device */
150 if (size < bdev_logical_block_size(bdev))
151 return -EINVAL;
152
153 /* Don't change the size if it is same as current */
154 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
155 sync_blockdev(bdev);
156 bdev->bd_inode->i_blkbits = blksize_bits(size);
157 kill_bdev(bdev);
158 }
159 return 0;
160 }
161
162 EXPORT_SYMBOL(set_blocksize);
163
sb_set_blocksize(struct super_block * sb,int size)164 int sb_set_blocksize(struct super_block *sb, int size)
165 {
166 if (set_blocksize(sb->s_bdev, size))
167 return 0;
168 /* If we get here, we know size is power of two
169 * and it's value is between 512 and PAGE_SIZE */
170 sb->s_blocksize = size;
171 sb->s_blocksize_bits = blksize_bits(size);
172 return sb->s_blocksize;
173 }
174
175 EXPORT_SYMBOL(sb_set_blocksize);
176
sb_min_blocksize(struct super_block * sb,int size)177 int sb_min_blocksize(struct super_block *sb, int size)
178 {
179 int minsize = bdev_logical_block_size(sb->s_bdev);
180 if (size < minsize)
181 size = minsize;
182 return sb_set_blocksize(sb, size);
183 }
184
185 EXPORT_SYMBOL(sb_min_blocksize);
186
sync_blockdev_nowait(struct block_device * bdev)187 int sync_blockdev_nowait(struct block_device *bdev)
188 {
189 if (!bdev)
190 return 0;
191 return filemap_flush(bdev->bd_inode->i_mapping);
192 }
193 EXPORT_SYMBOL_GPL(sync_blockdev_nowait);
194
195 /*
196 * Write out and wait upon all the dirty data associated with a block
197 * device via its mapping. Does not take the superblock lock.
198 */
sync_blockdev(struct block_device * bdev)199 int sync_blockdev(struct block_device *bdev)
200 {
201 if (!bdev)
202 return 0;
203 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
204 }
205 EXPORT_SYMBOL(sync_blockdev);
206
207 /*
208 * Write out and wait upon all dirty data associated with this
209 * device. Filesystem data as well as the underlying block
210 * device. Takes the superblock lock.
211 */
fsync_bdev(struct block_device * bdev)212 int fsync_bdev(struct block_device *bdev)
213 {
214 struct super_block *sb = get_super(bdev);
215 if (sb) {
216 int res = sync_filesystem(sb);
217 drop_super(sb);
218 return res;
219 }
220 return sync_blockdev(bdev);
221 }
222 EXPORT_SYMBOL(fsync_bdev);
223
224 /**
225 * freeze_bdev -- lock a filesystem and force it into a consistent state
226 * @bdev: blockdevice to lock
227 *
228 * If a superblock is found on this device, we take the s_umount semaphore
229 * on it to make sure nobody unmounts until the snapshot creation is done.
230 * The reference counter (bd_fsfreeze_count) guarantees that only the last
231 * unfreeze process can unfreeze the frozen filesystem actually when multiple
232 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
233 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
234 * actually.
235 */
freeze_bdev(struct block_device * bdev)236 int freeze_bdev(struct block_device *bdev)
237 {
238 struct super_block *sb;
239 int error = 0;
240
241 mutex_lock(&bdev->bd_fsfreeze_mutex);
242 if (++bdev->bd_fsfreeze_count > 1)
243 goto done;
244
245 sb = get_active_super(bdev);
246 if (!sb)
247 goto sync;
248 if (sb->s_op->freeze_super)
249 error = sb->s_op->freeze_super(sb);
250 else
251 error = freeze_super(sb);
252 deactivate_super(sb);
253
254 if (error) {
255 bdev->bd_fsfreeze_count--;
256 goto done;
257 }
258 bdev->bd_fsfreeze_sb = sb;
259
260 sync:
261 sync_blockdev(bdev);
262 done:
263 mutex_unlock(&bdev->bd_fsfreeze_mutex);
264 return error;
265 }
266 EXPORT_SYMBOL(freeze_bdev);
267
268 /**
269 * thaw_bdev -- unlock filesystem
270 * @bdev: blockdevice to unlock
271 *
272 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
273 */
thaw_bdev(struct block_device * bdev)274 int thaw_bdev(struct block_device *bdev)
275 {
276 struct super_block *sb;
277 int error = -EINVAL;
278
279 mutex_lock(&bdev->bd_fsfreeze_mutex);
280 if (!bdev->bd_fsfreeze_count)
281 goto out;
282
283 error = 0;
284 if (--bdev->bd_fsfreeze_count > 0)
285 goto out;
286
287 sb = bdev->bd_fsfreeze_sb;
288 if (!sb)
289 goto out;
290
291 if (sb->s_op->thaw_super)
292 error = sb->s_op->thaw_super(sb);
293 else
294 error = thaw_super(sb);
295 if (error)
296 bdev->bd_fsfreeze_count++;
297 else
298 bdev->bd_fsfreeze_sb = NULL;
299 out:
300 mutex_unlock(&bdev->bd_fsfreeze_mutex);
301 return error;
302 }
303 EXPORT_SYMBOL(thaw_bdev);
304
305 /**
306 * bdev_read_page() - Start reading a page from a block device
307 * @bdev: The device to read the page from
308 * @sector: The offset on the device to read the page to (need not be aligned)
309 * @page: The page to read
310 *
311 * On entry, the page should be locked. It will be unlocked when the page
312 * has been read. If the block driver implements rw_page synchronously,
313 * that will be true on exit from this function, but it need not be.
314 *
315 * Errors returned by this function are usually "soft", eg out of memory, or
316 * queue full; callers should try a different route to read this page rather
317 * than propagate an error back up the stack.
318 *
319 * Return: negative errno if an error occurs, 0 if submission was successful.
320 */
bdev_read_page(struct block_device * bdev,sector_t sector,struct page * page)321 int bdev_read_page(struct block_device *bdev, sector_t sector,
322 struct page *page)
323 {
324 const struct block_device_operations *ops = bdev->bd_disk->fops;
325 int result = -EOPNOTSUPP;
326
327 if (!ops->rw_page || bdev_get_integrity(bdev))
328 return result;
329
330 result = blk_queue_enter(bdev->bd_disk->queue, 0);
331 if (result)
332 return result;
333 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
334 REQ_OP_READ);
335 blk_queue_exit(bdev->bd_disk->queue);
336 return result;
337 }
338
339 /**
340 * bdev_write_page() - Start writing a page to a block device
341 * @bdev: The device to write the page to
342 * @sector: The offset on the device to write the page to (need not be aligned)
343 * @page: The page to write
344 * @wbc: The writeback_control for the write
345 *
346 * On entry, the page should be locked and not currently under writeback.
347 * On exit, if the write started successfully, the page will be unlocked and
348 * under writeback. If the write failed already (eg the driver failed to
349 * queue the page to the device), the page will still be locked. If the
350 * caller is a ->writepage implementation, it will need to unlock the page.
351 *
352 * Errors returned by this function are usually "soft", eg out of memory, or
353 * queue full; callers should try a different route to write this page rather
354 * than propagate an error back up the stack.
355 *
356 * Return: negative errno if an error occurs, 0 if submission was successful.
357 */
bdev_write_page(struct block_device * bdev,sector_t sector,struct page * page,struct writeback_control * wbc)358 int bdev_write_page(struct block_device *bdev, sector_t sector,
359 struct page *page, struct writeback_control *wbc)
360 {
361 int result;
362 const struct block_device_operations *ops = bdev->bd_disk->fops;
363
364 if (!ops->rw_page || bdev_get_integrity(bdev))
365 return -EOPNOTSUPP;
366 result = blk_queue_enter(bdev->bd_disk->queue, 0);
367 if (result)
368 return result;
369
370 set_page_writeback(page);
371 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
372 REQ_OP_WRITE);
373 if (result) {
374 end_page_writeback(page);
375 } else {
376 clean_page_buffers(page);
377 unlock_page(page);
378 }
379 blk_queue_exit(bdev->bd_disk->queue);
380 return result;
381 }
382
383 /*
384 * pseudo-fs
385 */
386
387 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
388 static struct kmem_cache * bdev_cachep __read_mostly;
389
bdev_alloc_inode(struct super_block * sb)390 static struct inode *bdev_alloc_inode(struct super_block *sb)
391 {
392 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
393
394 if (!ei)
395 return NULL;
396 memset(&ei->bdev, 0, sizeof(ei->bdev));
397 return &ei->vfs_inode;
398 }
399
bdev_free_inode(struct inode * inode)400 static void bdev_free_inode(struct inode *inode)
401 {
402 struct block_device *bdev = I_BDEV(inode);
403
404 free_percpu(bdev->bd_stats);
405 kfree(bdev->bd_meta_info);
406
407 if (!bdev_is_partition(bdev)) {
408 if (bdev->bd_disk && bdev->bd_disk->bdi)
409 bdi_put(bdev->bd_disk->bdi);
410 kfree(bdev->bd_disk);
411 }
412
413 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
414 blk_free_ext_minor(MINOR(bdev->bd_dev));
415
416 kmem_cache_free(bdev_cachep, BDEV_I(inode));
417 }
418
init_once(void * data)419 static void init_once(void *data)
420 {
421 struct bdev_inode *ei = data;
422
423 inode_init_once(&ei->vfs_inode);
424 }
425
bdev_evict_inode(struct inode * inode)426 static void bdev_evict_inode(struct inode *inode)
427 {
428 truncate_inode_pages_final(&inode->i_data);
429 invalidate_inode_buffers(inode); /* is it needed here? */
430 clear_inode(inode);
431 }
432
433 static const struct super_operations bdev_sops = {
434 .statfs = simple_statfs,
435 .alloc_inode = bdev_alloc_inode,
436 .free_inode = bdev_free_inode,
437 .drop_inode = generic_delete_inode,
438 .evict_inode = bdev_evict_inode,
439 };
440
bd_init_fs_context(struct fs_context * fc)441 static int bd_init_fs_context(struct fs_context *fc)
442 {
443 struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
444 if (!ctx)
445 return -ENOMEM;
446 fc->s_iflags |= SB_I_CGROUPWB;
447 ctx->ops = &bdev_sops;
448 return 0;
449 }
450
451 static struct file_system_type bd_type = {
452 .name = "bdev",
453 .init_fs_context = bd_init_fs_context,
454 .kill_sb = kill_anon_super,
455 };
456
457 struct super_block *blockdev_superblock __read_mostly;
458 EXPORT_SYMBOL_GPL(blockdev_superblock);
459
bdev_cache_init(void)460 void __init bdev_cache_init(void)
461 {
462 int err;
463 static struct vfsmount *bd_mnt;
464
465 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
466 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
467 SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
468 init_once);
469 err = register_filesystem(&bd_type);
470 if (err)
471 panic("Cannot register bdev pseudo-fs");
472 bd_mnt = kern_mount(&bd_type);
473 if (IS_ERR(bd_mnt))
474 panic("Cannot create bdev pseudo-fs");
475 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
476 }
477
bdev_alloc(struct gendisk * disk,u8 partno)478 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
479 {
480 struct block_device *bdev;
481 struct inode *inode;
482
483 inode = new_inode(blockdev_superblock);
484 if (!inode)
485 return NULL;
486 inode->i_mode = S_IFBLK;
487 inode->i_rdev = 0;
488 inode->i_data.a_ops = &def_blk_aops;
489 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
490
491 bdev = I_BDEV(inode);
492 mutex_init(&bdev->bd_fsfreeze_mutex);
493 spin_lock_init(&bdev->bd_size_lock);
494 bdev->bd_partno = partno;
495 bdev->bd_inode = inode;
496 bdev->bd_stats = alloc_percpu(struct disk_stats);
497 if (!bdev->bd_stats) {
498 iput(inode);
499 return NULL;
500 }
501 bdev->bd_disk = disk;
502 return bdev;
503 }
504
bdev_add(struct block_device * bdev,dev_t dev)505 void bdev_add(struct block_device *bdev, dev_t dev)
506 {
507 bdev->bd_dev = dev;
508 bdev->bd_inode->i_rdev = dev;
509 bdev->bd_inode->i_ino = dev;
510 insert_inode_hash(bdev->bd_inode);
511 }
512
nr_blockdev_pages(void)513 long nr_blockdev_pages(void)
514 {
515 struct inode *inode;
516 long ret = 0;
517
518 spin_lock(&blockdev_superblock->s_inode_list_lock);
519 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list)
520 ret += inode->i_mapping->nrpages;
521 spin_unlock(&blockdev_superblock->s_inode_list_lock);
522
523 return ret;
524 }
525
526 /**
527 * bd_may_claim - test whether a block device can be claimed
528 * @bdev: block device of interest
529 * @whole: whole block device containing @bdev, may equal @bdev
530 * @holder: holder trying to claim @bdev
531 *
532 * Test whether @bdev can be claimed by @holder.
533 *
534 * CONTEXT:
535 * spin_lock(&bdev_lock).
536 *
537 * RETURNS:
538 * %true if @bdev can be claimed, %false otherwise.
539 */
bd_may_claim(struct block_device * bdev,struct block_device * whole,void * holder)540 static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
541 void *holder)
542 {
543 if (bdev->bd_holder == holder)
544 return true; /* already a holder */
545 else if (bdev->bd_holder != NULL)
546 return false; /* held by someone else */
547 else if (whole == bdev)
548 return true; /* is a whole device which isn't held */
549
550 else if (whole->bd_holder == bd_may_claim)
551 return true; /* is a partition of a device that is being partitioned */
552 else if (whole->bd_holder != NULL)
553 return false; /* is a partition of a held device */
554 else
555 return true; /* is a partition of an un-held device */
556 }
557
558 /**
559 * bd_prepare_to_claim - claim a block device
560 * @bdev: block device of interest
561 * @holder: holder trying to claim @bdev
562 *
563 * Claim @bdev. This function fails if @bdev is already claimed by another
564 * holder and waits if another claiming is in progress. return, the caller
565 * has ownership of bd_claiming and bd_holder[s].
566 *
567 * RETURNS:
568 * 0 if @bdev can be claimed, -EBUSY otherwise.
569 */
bd_prepare_to_claim(struct block_device * bdev,void * holder)570 int bd_prepare_to_claim(struct block_device *bdev, void *holder)
571 {
572 struct block_device *whole = bdev_whole(bdev);
573
574 if (WARN_ON_ONCE(!holder))
575 return -EINVAL;
576 retry:
577 spin_lock(&bdev_lock);
578 /* if someone else claimed, fail */
579 if (!bd_may_claim(bdev, whole, holder)) {
580 spin_unlock(&bdev_lock);
581 return -EBUSY;
582 }
583
584 /* if claiming is already in progress, wait for it to finish */
585 if (whole->bd_claiming) {
586 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
587 DEFINE_WAIT(wait);
588
589 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
590 spin_unlock(&bdev_lock);
591 schedule();
592 finish_wait(wq, &wait);
593 goto retry;
594 }
595
596 /* yay, all mine */
597 whole->bd_claiming = holder;
598 spin_unlock(&bdev_lock);
599 return 0;
600 }
601 EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
602
bd_clear_claiming(struct block_device * whole,void * holder)603 static void bd_clear_claiming(struct block_device *whole, void *holder)
604 {
605 lockdep_assert_held(&bdev_lock);
606 /* tell others that we're done */
607 BUG_ON(whole->bd_claiming != holder);
608 whole->bd_claiming = NULL;
609 wake_up_bit(&whole->bd_claiming, 0);
610 }
611
612 /**
613 * bd_finish_claiming - finish claiming of a block device
614 * @bdev: block device of interest
615 * @holder: holder that has claimed @bdev
616 *
617 * Finish exclusive open of a block device. Mark the device as exlusively
618 * open by the holder and wake up all waiters for exclusive open to finish.
619 */
bd_finish_claiming(struct block_device * bdev,void * holder)620 static void bd_finish_claiming(struct block_device *bdev, void *holder)
621 {
622 struct block_device *whole = bdev_whole(bdev);
623
624 spin_lock(&bdev_lock);
625 BUG_ON(!bd_may_claim(bdev, whole, holder));
626 /*
627 * Note that for a whole device bd_holders will be incremented twice,
628 * and bd_holder will be set to bd_may_claim before being set to holder
629 */
630 whole->bd_holders++;
631 whole->bd_holder = bd_may_claim;
632 bdev->bd_holders++;
633 bdev->bd_holder = holder;
634 bd_clear_claiming(whole, holder);
635 spin_unlock(&bdev_lock);
636 }
637
638 /**
639 * bd_abort_claiming - abort claiming of a block device
640 * @bdev: block device of interest
641 * @holder: holder that has claimed @bdev
642 *
643 * Abort claiming of a block device when the exclusive open failed. This can be
644 * also used when exclusive open is not actually desired and we just needed
645 * to block other exclusive openers for a while.
646 */
bd_abort_claiming(struct block_device * bdev,void * holder)647 void bd_abort_claiming(struct block_device *bdev, void *holder)
648 {
649 spin_lock(&bdev_lock);
650 bd_clear_claiming(bdev_whole(bdev), holder);
651 spin_unlock(&bdev_lock);
652 }
653 EXPORT_SYMBOL(bd_abort_claiming);
654
blkdev_flush_mapping(struct block_device * bdev)655 static void blkdev_flush_mapping(struct block_device *bdev)
656 {
657 WARN_ON_ONCE(bdev->bd_holders);
658 sync_blockdev(bdev);
659 kill_bdev(bdev);
660 bdev_write_inode(bdev);
661 }
662
blkdev_get_whole(struct block_device * bdev,fmode_t mode)663 static int blkdev_get_whole(struct block_device *bdev, fmode_t mode)
664 {
665 struct gendisk *disk = bdev->bd_disk;
666 int ret = 0;
667
668 if (disk->fops->open) {
669 ret = disk->fops->open(bdev, mode);
670 if (ret) {
671 /* avoid ghost partitions on a removed medium */
672 if (ret == -ENOMEDIUM &&
673 test_bit(GD_NEED_PART_SCAN, &disk->state))
674 bdev_disk_changed(disk, true);
675 return ret;
676 }
677 }
678
679 if (!bdev->bd_openers)
680 set_init_blocksize(bdev);
681 if (test_bit(GD_NEED_PART_SCAN, &disk->state))
682 bdev_disk_changed(disk, false);
683 bdev->bd_openers++;
684 return 0;;
685 }
686
blkdev_put_whole(struct block_device * bdev,fmode_t mode)687 static void blkdev_put_whole(struct block_device *bdev, fmode_t mode)
688 {
689 if (!--bdev->bd_openers)
690 blkdev_flush_mapping(bdev);
691 if (bdev->bd_disk->fops->release)
692 bdev->bd_disk->fops->release(bdev->bd_disk, mode);
693 }
694
blkdev_get_part(struct block_device * part,fmode_t mode)695 static int blkdev_get_part(struct block_device *part, fmode_t mode)
696 {
697 struct gendisk *disk = part->bd_disk;
698 int ret;
699
700 if (part->bd_openers)
701 goto done;
702
703 ret = blkdev_get_whole(bdev_whole(part), mode);
704 if (ret)
705 return ret;
706
707 ret = -ENXIO;
708 if (!bdev_nr_sectors(part))
709 goto out_blkdev_put;
710
711 disk->open_partitions++;
712 set_init_blocksize(part);
713 done:
714 part->bd_openers++;
715 return 0;
716
717 out_blkdev_put:
718 blkdev_put_whole(bdev_whole(part), mode);
719 return ret;
720 }
721
blkdev_put_part(struct block_device * part,fmode_t mode)722 static void blkdev_put_part(struct block_device *part, fmode_t mode)
723 {
724 struct block_device *whole = bdev_whole(part);
725
726 if (--part->bd_openers)
727 return;
728 blkdev_flush_mapping(part);
729 whole->bd_disk->open_partitions--;
730 blkdev_put_whole(whole, mode);
731 }
732
blkdev_get_no_open(dev_t dev)733 struct block_device *blkdev_get_no_open(dev_t dev)
734 {
735 struct block_device *bdev;
736 struct inode *inode;
737
738 inode = ilookup(blockdev_superblock, dev);
739 if (!inode) {
740 blk_request_module(dev);
741 inode = ilookup(blockdev_superblock, dev);
742 if (!inode)
743 return NULL;
744 }
745
746 /* switch from the inode reference to a device mode one: */
747 bdev = &BDEV_I(inode)->bdev;
748 if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
749 bdev = NULL;
750 iput(inode);
751
752 if (!bdev)
753 return NULL;
754 if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN) ||
755 !try_module_get(bdev->bd_disk->fops->owner)) {
756 put_device(&bdev->bd_device);
757 return NULL;
758 }
759
760 return bdev;
761 }
762
blkdev_put_no_open(struct block_device * bdev)763 void blkdev_put_no_open(struct block_device *bdev)
764 {
765 module_put(bdev->bd_disk->fops->owner);
766 put_device(&bdev->bd_device);
767 }
768
769 /**
770 * blkdev_get_by_dev - open a block device by device number
771 * @dev: device number of block device to open
772 * @mode: FMODE_* mask
773 * @holder: exclusive holder identifier
774 *
775 * Open the block device described by device number @dev. If @mode includes
776 * %FMODE_EXCL, the block device is opened with exclusive access. Specifying
777 * %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may nest for
778 * the same @holder.
779 *
780 * Use this interface ONLY if you really do not have anything better - i.e. when
781 * you are behind a truly sucky interface and all you are given is a device
782 * number. Everything else should use blkdev_get_by_path().
783 *
784 * CONTEXT:
785 * Might sleep.
786 *
787 * RETURNS:
788 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
789 */
blkdev_get_by_dev(dev_t dev,fmode_t mode,void * holder)790 struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
791 {
792 bool unblock_events = true;
793 struct block_device *bdev;
794 struct gendisk *disk;
795 int ret;
796
797 ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
798 MAJOR(dev), MINOR(dev),
799 ((mode & FMODE_READ) ? DEVCG_ACC_READ : 0) |
800 ((mode & FMODE_WRITE) ? DEVCG_ACC_WRITE : 0));
801 if (ret)
802 return ERR_PTR(ret);
803
804 bdev = blkdev_get_no_open(dev);
805 if (!bdev)
806 return ERR_PTR(-ENXIO);
807 disk = bdev->bd_disk;
808
809 if (mode & FMODE_EXCL) {
810 ret = bd_prepare_to_claim(bdev, holder);
811 if (ret)
812 goto put_blkdev;
813 }
814
815 disk_block_events(disk);
816
817 mutex_lock(&disk->open_mutex);
818 ret = -ENXIO;
819 if (!disk_live(disk))
820 goto abort_claiming;
821 if (bdev_is_partition(bdev))
822 ret = blkdev_get_part(bdev, mode);
823 else
824 ret = blkdev_get_whole(bdev, mode);
825 if (ret)
826 goto abort_claiming;
827 if (mode & FMODE_EXCL) {
828 bd_finish_claiming(bdev, holder);
829
830 /*
831 * Block event polling for write claims if requested. Any write
832 * holder makes the write_holder state stick until all are
833 * released. This is good enough and tracking individual
834 * writeable reference is too fragile given the way @mode is
835 * used in blkdev_get/put().
836 */
837 if ((mode & FMODE_WRITE) && !bdev->bd_write_holder &&
838 (disk->event_flags & DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE)) {
839 bdev->bd_write_holder = true;
840 unblock_events = false;
841 }
842 }
843 mutex_unlock(&disk->open_mutex);
844
845 if (unblock_events)
846 disk_unblock_events(disk);
847 return bdev;
848
849 abort_claiming:
850 if (mode & FMODE_EXCL)
851 bd_abort_claiming(bdev, holder);
852 mutex_unlock(&disk->open_mutex);
853 disk_unblock_events(disk);
854 put_blkdev:
855 blkdev_put_no_open(bdev);
856 return ERR_PTR(ret);
857 }
858 EXPORT_SYMBOL(blkdev_get_by_dev);
859
860 /**
861 * blkdev_get_by_path - open a block device by name
862 * @path: path to the block device to open
863 * @mode: FMODE_* mask
864 * @holder: exclusive holder identifier
865 *
866 * Open the block device described by the device file at @path. If @mode
867 * includes %FMODE_EXCL, the block device is opened with exclusive access.
868 * Specifying %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may
869 * nest for the same @holder.
870 *
871 * CONTEXT:
872 * Might sleep.
873 *
874 * RETURNS:
875 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
876 */
blkdev_get_by_path(const char * path,fmode_t mode,void * holder)877 struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
878 void *holder)
879 {
880 struct block_device *bdev;
881 dev_t dev;
882 int error;
883
884 error = lookup_bdev(path, &dev);
885 if (error)
886 return ERR_PTR(error);
887
888 bdev = blkdev_get_by_dev(dev, mode, holder);
889 if (!IS_ERR(bdev) && (mode & FMODE_WRITE) && bdev_read_only(bdev)) {
890 blkdev_put(bdev, mode);
891 return ERR_PTR(-EACCES);
892 }
893
894 return bdev;
895 }
896 EXPORT_SYMBOL(blkdev_get_by_path);
897
blkdev_put(struct block_device * bdev,fmode_t mode)898 void blkdev_put(struct block_device *bdev, fmode_t mode)
899 {
900 struct gendisk *disk = bdev->bd_disk;
901
902 /*
903 * Sync early if it looks like we're the last one. If someone else
904 * opens the block device between now and the decrement of bd_openers
905 * then we did a sync that we didn't need to, but that's not the end
906 * of the world and we want to avoid long (could be several minute)
907 * syncs while holding the mutex.
908 */
909 if (bdev->bd_openers == 1)
910 sync_blockdev(bdev);
911
912 mutex_lock(&disk->open_mutex);
913 if (mode & FMODE_EXCL) {
914 struct block_device *whole = bdev_whole(bdev);
915 bool bdev_free;
916
917 /*
918 * Release a claim on the device. The holder fields
919 * are protected with bdev_lock. open_mutex is to
920 * synchronize disk_holder unlinking.
921 */
922 spin_lock(&bdev_lock);
923
924 WARN_ON_ONCE(--bdev->bd_holders < 0);
925 WARN_ON_ONCE(--whole->bd_holders < 0);
926
927 if ((bdev_free = !bdev->bd_holders))
928 bdev->bd_holder = NULL;
929 if (!whole->bd_holders)
930 whole->bd_holder = NULL;
931
932 spin_unlock(&bdev_lock);
933
934 /*
935 * If this was the last claim, remove holder link and
936 * unblock evpoll if it was a write holder.
937 */
938 if (bdev_free && bdev->bd_write_holder) {
939 disk_unblock_events(disk);
940 bdev->bd_write_holder = false;
941 }
942 }
943
944 /*
945 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
946 * event. This is to ensure detection of media removal commanded
947 * from userland - e.g. eject(1).
948 */
949 disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE);
950
951 if (bdev_is_partition(bdev))
952 blkdev_put_part(bdev, mode);
953 else
954 blkdev_put_whole(bdev, mode);
955 mutex_unlock(&disk->open_mutex);
956
957 blkdev_put_no_open(bdev);
958 }
959 EXPORT_SYMBOL(blkdev_put);
960
961 /**
962 * lookup_bdev - lookup a struct block_device by name
963 * @pathname: special file representing the block device
964 * @dev: return value of the block device's dev_t
965 *
966 * Get a reference to the blockdevice at @pathname in the current
967 * namespace if possible and return it. Return ERR_PTR(error)
968 * otherwise.
969 */
lookup_bdev(const char * pathname,dev_t * dev)970 int lookup_bdev(const char *pathname, dev_t *dev)
971 {
972 struct inode *inode;
973 struct path path;
974 int error;
975
976 if (!pathname || !*pathname)
977 return -EINVAL;
978
979 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
980 if (error)
981 return error;
982
983 inode = d_backing_inode(path.dentry);
984 error = -ENOTBLK;
985 if (!S_ISBLK(inode->i_mode))
986 goto out_path_put;
987 error = -EACCES;
988 if (!may_open_dev(&path))
989 goto out_path_put;
990
991 *dev = inode->i_rdev;
992 error = 0;
993 out_path_put:
994 path_put(&path);
995 return error;
996 }
997 EXPORT_SYMBOL(lookup_bdev);
998
__invalidate_device(struct block_device * bdev,bool kill_dirty)999 int __invalidate_device(struct block_device *bdev, bool kill_dirty)
1000 {
1001 struct super_block *sb = get_super(bdev);
1002 int res = 0;
1003
1004 if (sb) {
1005 /*
1006 * no need to lock the super, get_super holds the
1007 * read mutex so the filesystem cannot go away
1008 * under us (->put_super runs with the write lock
1009 * hold).
1010 */
1011 shrink_dcache_sb(sb);
1012 res = invalidate_inodes(sb, kill_dirty);
1013 drop_super(sb);
1014 }
1015 invalidate_bdev(bdev);
1016 return res;
1017 }
1018 EXPORT_SYMBOL(__invalidate_device);
1019
sync_bdevs(bool wait)1020 void sync_bdevs(bool wait)
1021 {
1022 struct inode *inode, *old_inode = NULL;
1023
1024 spin_lock(&blockdev_superblock->s_inode_list_lock);
1025 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
1026 struct address_space *mapping = inode->i_mapping;
1027 struct block_device *bdev;
1028
1029 spin_lock(&inode->i_lock);
1030 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
1031 mapping->nrpages == 0) {
1032 spin_unlock(&inode->i_lock);
1033 continue;
1034 }
1035 __iget(inode);
1036 spin_unlock(&inode->i_lock);
1037 spin_unlock(&blockdev_superblock->s_inode_list_lock);
1038 /*
1039 * We hold a reference to 'inode' so it couldn't have been
1040 * removed from s_inodes list while we dropped the
1041 * s_inode_list_lock We cannot iput the inode now as we can
1042 * be holding the last reference and we cannot iput it under
1043 * s_inode_list_lock. So we keep the reference and iput it
1044 * later.
1045 */
1046 iput(old_inode);
1047 old_inode = inode;
1048 bdev = I_BDEV(inode);
1049
1050 mutex_lock(&bdev->bd_disk->open_mutex);
1051 if (!bdev->bd_openers) {
1052 ; /* skip */
1053 } else if (wait) {
1054 /*
1055 * We keep the error status of individual mapping so
1056 * that applications can catch the writeback error using
1057 * fsync(2). See filemap_fdatawait_keep_errors() for
1058 * details.
1059 */
1060 filemap_fdatawait_keep_errors(inode->i_mapping);
1061 } else {
1062 filemap_fdatawrite(inode->i_mapping);
1063 }
1064 mutex_unlock(&bdev->bd_disk->open_mutex);
1065
1066 spin_lock(&blockdev_superblock->s_inode_list_lock);
1067 }
1068 spin_unlock(&blockdev_superblock->s_inode_list_lock);
1069 iput(old_inode);
1070 }
1071