• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/capability.h>
3 #include <linux/compat.h>
4 #include <linux/blkdev.h>
5 #include <linux/export.h>
6 #include <linux/gfp.h>
7 #include <linux/blkpg.h>
8 #include <linux/hdreg.h>
9 #include <linux/backing-dev.h>
10 #include <linux/fs.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/pr.h>
13 #include <linux/uaccess.h>
14 #include <linux/pagemap.h>
15 #include <linux/io_uring/cmd.h>
16 #include <uapi/linux/blkdev.h>
17 #include "blk.h"
18 
blkpg_do_ioctl(struct block_device * bdev,struct blkpg_partition __user * upart,int op)19 static int blkpg_do_ioctl(struct block_device *bdev,
20 			  struct blkpg_partition __user *upart, int op)
21 {
22 	struct gendisk *disk = bdev->bd_disk;
23 	struct blkpg_partition p;
24 	sector_t start, length, capacity, end;
25 
26 	if (!capable(CAP_SYS_ADMIN))
27 		return -EACCES;
28 	if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
29 		return -EFAULT;
30 	if (bdev_is_partition(bdev))
31 		return -EINVAL;
32 
33 	if (p.pno <= 0)
34 		return -EINVAL;
35 
36 	if (op == BLKPG_DEL_PARTITION)
37 		return bdev_del_partition(disk, p.pno);
38 
39 	if (p.start < 0 || p.length <= 0 || LLONG_MAX - p.length < p.start)
40 		return -EINVAL;
41 	/* Check that the partition is aligned to the block size */
42 	if (!IS_ALIGNED(p.start | p.length, bdev_logical_block_size(bdev)))
43 		return -EINVAL;
44 
45 	start = p.start >> SECTOR_SHIFT;
46 	length = p.length >> SECTOR_SHIFT;
47 	capacity = get_capacity(disk);
48 
49 	if (check_add_overflow(start, length, &end))
50 		return -EINVAL;
51 
52 	if (start >= capacity || end > capacity)
53 		return -EINVAL;
54 
55 	switch (op) {
56 	case BLKPG_ADD_PARTITION:
57 		return bdev_add_partition(disk, p.pno, start, length);
58 	case BLKPG_RESIZE_PARTITION:
59 		return bdev_resize_partition(disk, p.pno, start, length);
60 	default:
61 		return -EINVAL;
62 	}
63 }
64 
blkpg_ioctl(struct block_device * bdev,struct blkpg_ioctl_arg __user * arg)65 static int blkpg_ioctl(struct block_device *bdev,
66 		       struct blkpg_ioctl_arg __user *arg)
67 {
68 	struct blkpg_partition __user *udata;
69 	int op;
70 
71 	if (get_user(op, &arg->op) || get_user(udata, &arg->data))
72 		return -EFAULT;
73 
74 	return blkpg_do_ioctl(bdev, udata, op);
75 }
76 
77 #ifdef CONFIG_COMPAT
78 struct compat_blkpg_ioctl_arg {
79 	compat_int_t op;
80 	compat_int_t flags;
81 	compat_int_t datalen;
82 	compat_caddr_t data;
83 };
84 
compat_blkpg_ioctl(struct block_device * bdev,struct compat_blkpg_ioctl_arg __user * arg)85 static int compat_blkpg_ioctl(struct block_device *bdev,
86 			      struct compat_blkpg_ioctl_arg __user *arg)
87 {
88 	compat_caddr_t udata;
89 	int op;
90 
91 	if (get_user(op, &arg->op) || get_user(udata, &arg->data))
92 		return -EFAULT;
93 
94 	return blkpg_do_ioctl(bdev, compat_ptr(udata), op);
95 }
96 #endif
97 
98 /*
99  * Check that [start, start + len) is a valid range from the block device's
100  * perspective, including verifying that it can be correctly translated into
101  * logical block addresses.
102  */
blk_validate_byte_range(struct block_device * bdev,uint64_t start,uint64_t len)103 static int blk_validate_byte_range(struct block_device *bdev,
104 				   uint64_t start, uint64_t len)
105 {
106 	unsigned int bs_mask = bdev_logical_block_size(bdev) - 1;
107 	uint64_t end;
108 
109 	if ((start | len) & bs_mask)
110 		return -EINVAL;
111 	if (!len)
112 		return -EINVAL;
113 	if (check_add_overflow(start, len, &end) || end > bdev_nr_bytes(bdev))
114 		return -EINVAL;
115 
116 	return 0;
117 }
118 
blk_ioctl_discard(struct block_device * bdev,blk_mode_t mode,unsigned long arg)119 static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
120 		unsigned long arg)
121 {
122 	uint64_t range[2], start, len;
123 	struct bio *prev = NULL, *bio;
124 	sector_t sector, nr_sects;
125 	struct blk_plug plug;
126 	int err;
127 
128 	if (copy_from_user(range, (void __user *)arg, sizeof(range)))
129 		return -EFAULT;
130 	start = range[0];
131 	len = range[1];
132 
133 	if (!bdev_max_discard_sectors(bdev))
134 		return -EOPNOTSUPP;
135 
136 	if (!(mode & BLK_OPEN_WRITE))
137 		return -EBADF;
138 	if (bdev_read_only(bdev))
139 		return -EPERM;
140 	err = blk_validate_byte_range(bdev, start, len);
141 	if (err)
142 		return err;
143 
144 	inode_lock(bdev->bd_mapping->host);
145 	filemap_invalidate_lock(bdev->bd_mapping);
146 	err = truncate_bdev_range(bdev, mode, start, start + len - 1);
147 	if (err)
148 		goto fail;
149 
150 	sector = start >> SECTOR_SHIFT;
151 	nr_sects = len >> SECTOR_SHIFT;
152 
153 	blk_start_plug(&plug);
154 	while (1) {
155 		if (fatal_signal_pending(current)) {
156 			if (prev)
157 				bio_await_chain(prev);
158 			err = -EINTR;
159 			goto out_unplug;
160 		}
161 		bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects,
162 				GFP_KERNEL);
163 		if (!bio)
164 			break;
165 		prev = bio_chain_and_submit(prev, bio);
166 	}
167 	if (prev) {
168 		err = submit_bio_wait(prev);
169 		if (err == -EOPNOTSUPP)
170 			err = 0;
171 		bio_put(prev);
172 	}
173 out_unplug:
174 	blk_finish_plug(&plug);
175 fail:
176 	filemap_invalidate_unlock(bdev->bd_mapping);
177 	inode_unlock(bdev->bd_mapping->host);
178 	return err;
179 }
180 
blk_ioctl_secure_erase(struct block_device * bdev,blk_mode_t mode,void __user * argp)181 static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
182 		void __user *argp)
183 {
184 	uint64_t start, len, end;
185 	uint64_t range[2];
186 	int err;
187 
188 	if (!(mode & BLK_OPEN_WRITE))
189 		return -EBADF;
190 	if (!bdev_max_secure_erase_sectors(bdev))
191 		return -EOPNOTSUPP;
192 	if (copy_from_user(range, argp, sizeof(range)))
193 		return -EFAULT;
194 
195 	start = range[0];
196 	len = range[1];
197 	if ((start & 511) || (len & 511))
198 		return -EINVAL;
199 	if (check_add_overflow(start, len, &end) ||
200 	    end > bdev_nr_bytes(bdev))
201 		return -EINVAL;
202 
203 	inode_lock(bdev->bd_mapping->host);
204 	filemap_invalidate_lock(bdev->bd_mapping);
205 	err = truncate_bdev_range(bdev, mode, start, end - 1);
206 	if (!err)
207 		err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
208 						GFP_KERNEL);
209 	filemap_invalidate_unlock(bdev->bd_mapping);
210 	inode_unlock(bdev->bd_mapping->host);
211 	return err;
212 }
213 
214 
blk_ioctl_zeroout(struct block_device * bdev,blk_mode_t mode,unsigned long arg)215 static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
216 		unsigned long arg)
217 {
218 	uint64_t range[2];
219 	uint64_t start, end, len;
220 	int err;
221 
222 	if (!(mode & BLK_OPEN_WRITE))
223 		return -EBADF;
224 
225 	if (copy_from_user(range, (void __user *)arg, sizeof(range)))
226 		return -EFAULT;
227 
228 	start = range[0];
229 	len = range[1];
230 	end = start + len - 1;
231 
232 	if (start & 511)
233 		return -EINVAL;
234 	if (len & 511)
235 		return -EINVAL;
236 	if (end >= (uint64_t)bdev_nr_bytes(bdev))
237 		return -EINVAL;
238 	if (end < start)
239 		return -EINVAL;
240 
241 	/* Invalidate the page cache, including dirty pages */
242 	inode_lock(bdev->bd_mapping->host);
243 	filemap_invalidate_lock(bdev->bd_mapping);
244 	err = truncate_bdev_range(bdev, mode, start, end);
245 	if (err)
246 		goto fail;
247 
248 	err = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL,
249 				   BLKDEV_ZERO_NOUNMAP | BLKDEV_ZERO_KILLABLE);
250 
251 fail:
252 	filemap_invalidate_unlock(bdev->bd_mapping);
253 	inode_unlock(bdev->bd_mapping->host);
254 	return err;
255 }
256 
put_ushort(unsigned short __user * argp,unsigned short val)257 static int put_ushort(unsigned short __user *argp, unsigned short val)
258 {
259 	return put_user(val, argp);
260 }
261 
put_int(int __user * argp,int val)262 static int put_int(int __user *argp, int val)
263 {
264 	return put_user(val, argp);
265 }
266 
put_uint(unsigned int __user * argp,unsigned int val)267 static int put_uint(unsigned int __user *argp, unsigned int val)
268 {
269 	return put_user(val, argp);
270 }
271 
put_long(long __user * argp,long val)272 static int put_long(long __user *argp, long val)
273 {
274 	return put_user(val, argp);
275 }
276 
put_ulong(unsigned long __user * argp,unsigned long val)277 static int put_ulong(unsigned long __user *argp, unsigned long val)
278 {
279 	return put_user(val, argp);
280 }
281 
put_u64(u64 __user * argp,u64 val)282 static int put_u64(u64 __user *argp, u64 val)
283 {
284 	return put_user(val, argp);
285 }
286 
287 #ifdef CONFIG_COMPAT
compat_put_long(compat_long_t __user * argp,long val)288 static int compat_put_long(compat_long_t __user *argp, long val)
289 {
290 	return put_user(val, argp);
291 }
292 
compat_put_ulong(compat_ulong_t __user * argp,compat_ulong_t val)293 static int compat_put_ulong(compat_ulong_t __user *argp, compat_ulong_t val)
294 {
295 	return put_user(val, argp);
296 }
297 #endif
298 
299 #ifdef CONFIG_COMPAT
300 /*
301  * This is the equivalent of compat_ptr_ioctl(), to be used by block
302  * drivers that implement only commands that are completely compatible
303  * between 32-bit and 64-bit user space
304  */
blkdev_compat_ptr_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned cmd,unsigned long arg)305 int blkdev_compat_ptr_ioctl(struct block_device *bdev, blk_mode_t mode,
306 			unsigned cmd, unsigned long arg)
307 {
308 	struct gendisk *disk = bdev->bd_disk;
309 
310 	if (disk->fops->ioctl)
311 		return disk->fops->ioctl(bdev, mode, cmd,
312 					 (unsigned long)compat_ptr(arg));
313 
314 	return -ENOIOCTLCMD;
315 }
316 EXPORT_SYMBOL(blkdev_compat_ptr_ioctl);
317 #endif
318 
blkdev_pr_allowed(struct block_device * bdev,blk_mode_t mode)319 static bool blkdev_pr_allowed(struct block_device *bdev, blk_mode_t mode)
320 {
321 	/* no sense to make reservations for partitions */
322 	if (bdev_is_partition(bdev))
323 		return false;
324 
325 	if (capable(CAP_SYS_ADMIN))
326 		return true;
327 	/*
328 	 * Only allow unprivileged reservations if the file descriptor is open
329 	 * for writing.
330 	 */
331 	return mode & BLK_OPEN_WRITE;
332 }
333 
blkdev_pr_register(struct block_device * bdev,blk_mode_t mode,struct pr_registration __user * arg)334 static int blkdev_pr_register(struct block_device *bdev, blk_mode_t mode,
335 		struct pr_registration __user *arg)
336 {
337 	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
338 	struct pr_registration reg;
339 
340 	if (!blkdev_pr_allowed(bdev, mode))
341 		return -EPERM;
342 	if (!ops || !ops->pr_register)
343 		return -EOPNOTSUPP;
344 	if (copy_from_user(&reg, arg, sizeof(reg)))
345 		return -EFAULT;
346 
347 	if (reg.flags & ~PR_FL_IGNORE_KEY)
348 		return -EOPNOTSUPP;
349 	return ops->pr_register(bdev, reg.old_key, reg.new_key, reg.flags);
350 }
351 
blkdev_pr_reserve(struct block_device * bdev,blk_mode_t mode,struct pr_reservation __user * arg)352 static int blkdev_pr_reserve(struct block_device *bdev, blk_mode_t mode,
353 		struct pr_reservation __user *arg)
354 {
355 	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
356 	struct pr_reservation rsv;
357 
358 	if (!blkdev_pr_allowed(bdev, mode))
359 		return -EPERM;
360 	if (!ops || !ops->pr_reserve)
361 		return -EOPNOTSUPP;
362 	if (copy_from_user(&rsv, arg, sizeof(rsv)))
363 		return -EFAULT;
364 
365 	if (rsv.flags & ~PR_FL_IGNORE_KEY)
366 		return -EOPNOTSUPP;
367 	return ops->pr_reserve(bdev, rsv.key, rsv.type, rsv.flags);
368 }
369 
blkdev_pr_release(struct block_device * bdev,blk_mode_t mode,struct pr_reservation __user * arg)370 static int blkdev_pr_release(struct block_device *bdev, blk_mode_t mode,
371 		struct pr_reservation __user *arg)
372 {
373 	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
374 	struct pr_reservation rsv;
375 
376 	if (!blkdev_pr_allowed(bdev, mode))
377 		return -EPERM;
378 	if (!ops || !ops->pr_release)
379 		return -EOPNOTSUPP;
380 	if (copy_from_user(&rsv, arg, sizeof(rsv)))
381 		return -EFAULT;
382 
383 	if (rsv.flags)
384 		return -EOPNOTSUPP;
385 	return ops->pr_release(bdev, rsv.key, rsv.type);
386 }
387 
blkdev_pr_preempt(struct block_device * bdev,blk_mode_t mode,struct pr_preempt __user * arg,bool abort)388 static int blkdev_pr_preempt(struct block_device *bdev, blk_mode_t mode,
389 		struct pr_preempt __user *arg, bool abort)
390 {
391 	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
392 	struct pr_preempt p;
393 
394 	if (!blkdev_pr_allowed(bdev, mode))
395 		return -EPERM;
396 	if (!ops || !ops->pr_preempt)
397 		return -EOPNOTSUPP;
398 	if (copy_from_user(&p, arg, sizeof(p)))
399 		return -EFAULT;
400 
401 	if (p.flags)
402 		return -EOPNOTSUPP;
403 	return ops->pr_preempt(bdev, p.old_key, p.new_key, p.type, abort);
404 }
405 
blkdev_pr_clear(struct block_device * bdev,blk_mode_t mode,struct pr_clear __user * arg)406 static int blkdev_pr_clear(struct block_device *bdev, blk_mode_t mode,
407 		struct pr_clear __user *arg)
408 {
409 	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
410 	struct pr_clear c;
411 
412 	if (!blkdev_pr_allowed(bdev, mode))
413 		return -EPERM;
414 	if (!ops || !ops->pr_clear)
415 		return -EOPNOTSUPP;
416 	if (copy_from_user(&c, arg, sizeof(c)))
417 		return -EFAULT;
418 
419 	if (c.flags)
420 		return -EOPNOTSUPP;
421 	return ops->pr_clear(bdev, c.key);
422 }
423 
blkdev_flushbuf(struct block_device * bdev,unsigned cmd,unsigned long arg)424 static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd,
425 		unsigned long arg)
426 {
427 	if (!capable(CAP_SYS_ADMIN))
428 		return -EACCES;
429 
430 	mutex_lock(&bdev->bd_holder_lock);
431 	if (bdev->bd_holder_ops && bdev->bd_holder_ops->sync)
432 		bdev->bd_holder_ops->sync(bdev);
433 	else {
434 		mutex_unlock(&bdev->bd_holder_lock);
435 		sync_blockdev(bdev);
436 	}
437 
438 	invalidate_bdev(bdev);
439 	return 0;
440 }
441 
blkdev_roset(struct block_device * bdev,unsigned cmd,unsigned long arg)442 static int blkdev_roset(struct block_device *bdev, unsigned cmd,
443 		unsigned long arg)
444 {
445 	int ret, n;
446 
447 	if (!capable(CAP_SYS_ADMIN))
448 		return -EACCES;
449 
450 	if (get_user(n, (int __user *)arg))
451 		return -EFAULT;
452 	if (bdev->bd_disk->fops->set_read_only) {
453 		ret = bdev->bd_disk->fops->set_read_only(bdev, n);
454 		if (ret)
455 			return ret;
456 	}
457 	if (n)
458 		bdev_set_flag(bdev, BD_READ_ONLY);
459 	else
460 		bdev_clear_flag(bdev, BD_READ_ONLY);
461 	return 0;
462 }
463 
blkdev_getgeo(struct block_device * bdev,struct hd_geometry __user * argp)464 static int blkdev_getgeo(struct block_device *bdev,
465 		struct hd_geometry __user *argp)
466 {
467 	struct gendisk *disk = bdev->bd_disk;
468 	struct hd_geometry geo;
469 	int ret;
470 
471 	if (!argp)
472 		return -EINVAL;
473 	if (!disk->fops->getgeo)
474 		return -ENOTTY;
475 
476 	/*
477 	 * We need to set the startsect first, the driver may
478 	 * want to override it.
479 	 */
480 	memset(&geo, 0, sizeof(geo));
481 	geo.start = get_start_sect(bdev);
482 	ret = disk->fops->getgeo(bdev, &geo);
483 	if (ret)
484 		return ret;
485 	if (copy_to_user(argp, &geo, sizeof(geo)))
486 		return -EFAULT;
487 	return 0;
488 }
489 
490 #ifdef CONFIG_COMPAT
491 struct compat_hd_geometry {
492 	unsigned char heads;
493 	unsigned char sectors;
494 	unsigned short cylinders;
495 	u32 start;
496 };
497 
compat_hdio_getgeo(struct block_device * bdev,struct compat_hd_geometry __user * ugeo)498 static int compat_hdio_getgeo(struct block_device *bdev,
499 			      struct compat_hd_geometry __user *ugeo)
500 {
501 	struct gendisk *disk = bdev->bd_disk;
502 	struct hd_geometry geo;
503 	int ret;
504 
505 	if (!ugeo)
506 		return -EINVAL;
507 	if (!disk->fops->getgeo)
508 		return -ENOTTY;
509 
510 	memset(&geo, 0, sizeof(geo));
511 	/*
512 	 * We need to set the startsect first, the driver may
513 	 * want to override it.
514 	 */
515 	geo.start = get_start_sect(bdev);
516 	ret = disk->fops->getgeo(bdev, &geo);
517 	if (ret)
518 		return ret;
519 
520 	ret = copy_to_user(ugeo, &geo, 4);
521 	ret |= put_user(geo.start, &ugeo->start);
522 	if (ret)
523 		ret = -EFAULT;
524 
525 	return ret;
526 }
527 #endif
528 
529 /* set the logical block size */
blkdev_bszset(struct file * file,blk_mode_t mode,int __user * argp)530 static int blkdev_bszset(struct file *file, blk_mode_t mode,
531 		int __user *argp)
532 {
533 	// this one might be file_inode(file)->i_rdev - a rare valid
534 	// use of file_inode() for those.
535 	dev_t dev = I_BDEV(file->f_mapping->host)->bd_dev;
536 	struct file *excl_file;
537 	int ret, n;
538 
539 	if (!capable(CAP_SYS_ADMIN))
540 		return -EACCES;
541 	if (!argp)
542 		return -EINVAL;
543 	if (get_user(n, argp))
544 		return -EFAULT;
545 
546 	if (mode & BLK_OPEN_EXCL)
547 		return set_blocksize(file, n);
548 
549 	excl_file = bdev_file_open_by_dev(dev, mode, &dev, NULL);
550 	if (IS_ERR(excl_file))
551 		return -EBUSY;
552 	ret = set_blocksize(excl_file, n);
553 	fput(excl_file);
554 	return ret;
555 }
556 
557 /*
558  * Common commands that are handled the same way on native and compat
559  * user space. Note the separate arg/argp parameters that are needed
560  * to deal with the compat_ptr() conversion.
561  */
blkdev_common_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg,void __user * argp)562 static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode,
563 			       unsigned int cmd, unsigned long arg,
564 			       void __user *argp)
565 {
566 	unsigned int max_sectors;
567 
568 	switch (cmd) {
569 	case BLKFLSBUF:
570 		return blkdev_flushbuf(bdev, cmd, arg);
571 	case BLKROSET:
572 		return blkdev_roset(bdev, cmd, arg);
573 	case BLKDISCARD:
574 		return blk_ioctl_discard(bdev, mode, arg);
575 	case BLKSECDISCARD:
576 		return blk_ioctl_secure_erase(bdev, mode, argp);
577 	case BLKZEROOUT:
578 		return blk_ioctl_zeroout(bdev, mode, arg);
579 	case BLKGETDISKSEQ:
580 		return put_u64(argp, bdev->bd_disk->diskseq);
581 	case BLKREPORTZONE:
582 		return blkdev_report_zones_ioctl(bdev, cmd, arg);
583 	case BLKRESETZONE:
584 	case BLKOPENZONE:
585 	case BLKCLOSEZONE:
586 	case BLKFINISHZONE:
587 		return blkdev_zone_mgmt_ioctl(bdev, mode, cmd, arg);
588 	case BLKGETZONESZ:
589 		return put_uint(argp, bdev_zone_sectors(bdev));
590 	case BLKGETNRZONES:
591 		return put_uint(argp, bdev_nr_zones(bdev));
592 	case BLKROGET:
593 		return put_int(argp, bdev_read_only(bdev) != 0);
594 	case BLKSSZGET: /* get block device logical block size */
595 		return put_int(argp, bdev_logical_block_size(bdev));
596 	case BLKPBSZGET: /* get block device physical block size */
597 		return put_uint(argp, bdev_physical_block_size(bdev));
598 	case BLKIOMIN:
599 		return put_uint(argp, bdev_io_min(bdev));
600 	case BLKIOOPT:
601 		return put_uint(argp, bdev_io_opt(bdev));
602 	case BLKALIGNOFF:
603 		return put_int(argp, bdev_alignment_offset(bdev));
604 	case BLKDISCARDZEROES:
605 		return put_uint(argp, 0);
606 	case BLKSECTGET:
607 		max_sectors = min_t(unsigned int, USHRT_MAX,
608 				    queue_max_sectors(bdev_get_queue(bdev)));
609 		return put_ushort(argp, max_sectors);
610 	case BLKROTATIONAL:
611 		return put_ushort(argp, !bdev_nonrot(bdev));
612 	case BLKRASET:
613 	case BLKFRASET:
614 		if(!capable(CAP_SYS_ADMIN))
615 			return -EACCES;
616 		bdev->bd_disk->bdi->ra_pages = (arg * 512) / PAGE_SIZE;
617 		return 0;
618 	case BLKRRPART:
619 		if (!capable(CAP_SYS_ADMIN))
620 			return -EACCES;
621 		if (bdev_is_partition(bdev))
622 			return -EINVAL;
623 		return disk_scan_partitions(bdev->bd_disk,
624 				mode | BLK_OPEN_STRICT_SCAN);
625 	case BLKTRACESTART:
626 	case BLKTRACESTOP:
627 	case BLKTRACETEARDOWN:
628 		return blk_trace_ioctl(bdev, cmd, argp);
629 	case IOC_PR_REGISTER:
630 		return blkdev_pr_register(bdev, mode, argp);
631 	case IOC_PR_RESERVE:
632 		return blkdev_pr_reserve(bdev, mode, argp);
633 	case IOC_PR_RELEASE:
634 		return blkdev_pr_release(bdev, mode, argp);
635 	case IOC_PR_PREEMPT:
636 		return blkdev_pr_preempt(bdev, mode, argp, false);
637 	case IOC_PR_PREEMPT_ABORT:
638 		return blkdev_pr_preempt(bdev, mode, argp, true);
639 	case IOC_PR_CLEAR:
640 		return blkdev_pr_clear(bdev, mode, argp);
641 	default:
642 		return -ENOIOCTLCMD;
643 	}
644 }
645 
646 /*
647  * Always keep this in sync with compat_blkdev_ioctl()
648  * to handle all incompatible commands in both functions.
649  *
650  * New commands must be compatible and go into blkdev_common_ioctl
651  */
blkdev_ioctl(struct file * file,unsigned cmd,unsigned long arg)652 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
653 {
654 	struct block_device *bdev = I_BDEV(file->f_mapping->host);
655 	void __user *argp = (void __user *)arg;
656 	blk_mode_t mode = file_to_blk_mode(file);
657 	int ret;
658 
659 	switch (cmd) {
660 	/* These need separate implementations for the data structure */
661 	case HDIO_GETGEO:
662 		return blkdev_getgeo(bdev, argp);
663 	case BLKPG:
664 		return blkpg_ioctl(bdev, argp);
665 
666 	/* Compat mode returns 32-bit data instead of 'long' */
667 	case BLKRAGET:
668 	case BLKFRAGET:
669 		if (!argp)
670 			return -EINVAL;
671 		return put_long(argp,
672 			(bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
673 	case BLKGETSIZE:
674 		if (bdev_nr_sectors(bdev) > ~0UL)
675 			return -EFBIG;
676 		return put_ulong(argp, bdev_nr_sectors(bdev));
677 
678 	/* The data is compatible, but the command number is different */
679 	case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
680 		return put_int(argp, block_size(bdev));
681 	case BLKBSZSET:
682 		return blkdev_bszset(file, mode, argp);
683 	case BLKGETSIZE64:
684 		return put_u64(argp, bdev_nr_bytes(bdev));
685 
686 	/* Incompatible alignment on i386 */
687 	case BLKTRACESETUP:
688 		return blk_trace_ioctl(bdev, cmd, argp);
689 	default:
690 		break;
691 	}
692 
693 	ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
694 	if (ret != -ENOIOCTLCMD)
695 		return ret;
696 
697 	if (!bdev->bd_disk->fops->ioctl)
698 		return -ENOTTY;
699 	return bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
700 }
701 
702 #ifdef CONFIG_COMPAT
703 
704 #define BLKBSZGET_32		_IOR(0x12, 112, int)
705 #define BLKBSZSET_32		_IOW(0x12, 113, int)
706 #define BLKGETSIZE64_32		_IOR(0x12, 114, int)
707 
708 /* Most of the generic ioctls are handled in the normal fallback path.
709    This assumes the blkdev's low level compat_ioctl always returns
710    ENOIOCTLCMD for unknown ioctls. */
compat_blkdev_ioctl(struct file * file,unsigned cmd,unsigned long arg)711 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
712 {
713 	int ret;
714 	void __user *argp = compat_ptr(arg);
715 	struct block_device *bdev = I_BDEV(file->f_mapping->host);
716 	struct gendisk *disk = bdev->bd_disk;
717 	blk_mode_t mode = file_to_blk_mode(file);
718 
719 	switch (cmd) {
720 	/* These need separate implementations for the data structure */
721 	case HDIO_GETGEO:
722 		return compat_hdio_getgeo(bdev, argp);
723 	case BLKPG:
724 		return compat_blkpg_ioctl(bdev, argp);
725 
726 	/* Compat mode returns 32-bit data instead of 'long' */
727 	case BLKRAGET:
728 	case BLKFRAGET:
729 		if (!argp)
730 			return -EINVAL;
731 		return compat_put_long(argp,
732 			(bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
733 	case BLKGETSIZE:
734 		if (bdev_nr_sectors(bdev) > ~(compat_ulong_t)0)
735 			return -EFBIG;
736 		return compat_put_ulong(argp, bdev_nr_sectors(bdev));
737 
738 	/* The data is compatible, but the command number is different */
739 	case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
740 		return put_int(argp, bdev_logical_block_size(bdev));
741 	case BLKBSZSET_32:
742 		return blkdev_bszset(file, mode, argp);
743 	case BLKGETSIZE64_32:
744 		return put_u64(argp, bdev_nr_bytes(bdev));
745 
746 	/* Incompatible alignment on i386 */
747 	case BLKTRACESETUP32:
748 		return blk_trace_ioctl(bdev, cmd, argp);
749 	default:
750 		break;
751 	}
752 
753 	ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
754 	if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl)
755 		ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
756 
757 	return ret;
758 }
759 #endif
760 
761 struct blk_iou_cmd {
762 	int res;
763 	bool nowait;
764 };
765 
blk_cmd_complete(struct io_uring_cmd * cmd,unsigned int issue_flags)766 static void blk_cmd_complete(struct io_uring_cmd *cmd, unsigned int issue_flags)
767 {
768 	struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
769 
770 	if (bic->res == -EAGAIN && bic->nowait)
771 		io_uring_cmd_issue_blocking(cmd);
772 	else
773 		io_uring_cmd_done(cmd, bic->res, 0, issue_flags);
774 }
775 
bio_cmd_bio_end_io(struct bio * bio)776 static void bio_cmd_bio_end_io(struct bio *bio)
777 {
778 	struct io_uring_cmd *cmd = bio->bi_private;
779 	struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
780 
781 	if (unlikely(bio->bi_status) && !bic->res)
782 		bic->res = blk_status_to_errno(bio->bi_status);
783 
784 	io_uring_cmd_do_in_task_lazy(cmd, blk_cmd_complete);
785 	bio_put(bio);
786 }
787 
blkdev_cmd_discard(struct io_uring_cmd * cmd,struct block_device * bdev,uint64_t start,uint64_t len,bool nowait)788 static int blkdev_cmd_discard(struct io_uring_cmd *cmd,
789 			      struct block_device *bdev,
790 			      uint64_t start, uint64_t len, bool nowait)
791 {
792 	struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
793 	gfp_t gfp = nowait ? GFP_NOWAIT : GFP_KERNEL;
794 	sector_t sector = start >> SECTOR_SHIFT;
795 	sector_t nr_sects = len >> SECTOR_SHIFT;
796 	struct bio *prev = NULL, *bio;
797 	int err;
798 
799 	if (!bdev_max_discard_sectors(bdev))
800 		return -EOPNOTSUPP;
801 	if (!(file_to_blk_mode(cmd->file) & BLK_OPEN_WRITE))
802 		return -EBADF;
803 	if (bdev_read_only(bdev))
804 		return -EPERM;
805 	err = blk_validate_byte_range(bdev, start, len);
806 	if (err)
807 		return err;
808 
809 	err = filemap_invalidate_pages(bdev->bd_mapping, start,
810 					start + len - 1, nowait);
811 	if (err)
812 		return err;
813 
814 	while (true) {
815 		bio = blk_alloc_discard_bio(bdev, &sector, &nr_sects, gfp);
816 		if (!bio)
817 			break;
818 		if (nowait) {
819 			/*
820 			 * Don't allow multi-bio non-blocking submissions as
821 			 * subsequent bios may fail but we won't get a direct
822 			 * indication of that. Normally, the caller should
823 			 * retry from a blocking context.
824 			 */
825 			if (unlikely(nr_sects)) {
826 				bio_put(bio);
827 				return -EAGAIN;
828 			}
829 			bio->bi_opf |= REQ_NOWAIT;
830 		}
831 
832 		prev = bio_chain_and_submit(prev, bio);
833 	}
834 	if (unlikely(!prev))
835 		return -EAGAIN;
836 	if (unlikely(nr_sects))
837 		bic->res = -EAGAIN;
838 
839 	prev->bi_private = cmd;
840 	prev->bi_end_io = bio_cmd_bio_end_io;
841 	submit_bio(prev);
842 	return -EIOCBQUEUED;
843 }
844 
blkdev_uring_cmd(struct io_uring_cmd * cmd,unsigned int issue_flags)845 int blkdev_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags)
846 {
847 	struct block_device *bdev = I_BDEV(cmd->file->f_mapping->host);
848 	struct blk_iou_cmd *bic = io_uring_cmd_to_pdu(cmd, struct blk_iou_cmd);
849 	const struct io_uring_sqe *sqe = cmd->sqe;
850 	u32 cmd_op = cmd->cmd_op;
851 	uint64_t start, len;
852 
853 	if (unlikely(sqe->ioprio || sqe->__pad1 || sqe->len ||
854 		     sqe->rw_flags || sqe->file_index))
855 		return -EINVAL;
856 
857 	bic->res = 0;
858 	bic->nowait = issue_flags & IO_URING_F_NONBLOCK;
859 
860 	start = READ_ONCE(sqe->addr);
861 	len = READ_ONCE(sqe->addr3);
862 
863 	switch (cmd_op) {
864 	case BLOCK_URING_CMD_DISCARD:
865 		return blkdev_cmd_discard(cmd, bdev, start, len, bic->nowait);
866 	}
867 	return -EINVAL;
868 }
869