1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2014 Ezequiel Garcia
4 * Copyright (c) 2011 Free Electrons
5 *
6 * Driver parameter handling strongly based on drivers/mtd/ubi/build.c
7 * Copyright (c) International Business Machines Corp., 2006
8 * Copyright (c) Nokia Corporation, 2007
9 * Authors: Artem Bityutskiy, Frank Haverkamp
10 */
11
12 /*
13 * Read-only block devices on top of UBI volumes
14 *
15 * A simple implementation to allow a block device to be layered on top of a
16 * UBI volume. The implementation is provided by creating a static 1-to-1
17 * mapping between the block device and the UBI volume.
18 *
19 * The addressed byte is obtained from the addressed block sector, which is
20 * mapped linearly into the corresponding LEB:
21 *
22 * LEB number = addressed byte / LEB size
23 *
24 * This feature is compiled in the UBI core, and adds a 'block' parameter
25 * to allow early creation of block devices on top of UBI volumes. Runtime
26 * block creation/removal for UBI volumes is provided through two UBI ioctls:
27 * UBI_IOCVOLCRBLK and UBI_IOCVOLRMBLK.
28 */
29
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/err.h>
33 #include <linux/kernel.h>
34 #include <linux/list.h>
35 #include <linux/mutex.h>
36 #include <linux/slab.h>
37 #include <linux/mtd/ubi.h>
38 #include <linux/workqueue.h>
39 #include <linux/blkdev.h>
40 #include <linux/blk-mq.h>
41 #include <linux/hdreg.h>
42 #include <linux/scatterlist.h>
43 #include <linux/idr.h>
44 #include <asm/div64.h>
45
46 #include "ubi-media.h"
47 #include "ubi.h"
48
49 /* Maximum number of supported devices */
50 #define UBIBLOCK_MAX_DEVICES 32
51
52 /* Maximum length of the 'block=' parameter */
53 #define UBIBLOCK_PARAM_LEN 63
54
55 /* Maximum number of comma-separated items in the 'block=' parameter */
56 #define UBIBLOCK_PARAM_COUNT 2
57
58 struct ubiblock_param {
59 int ubi_num;
60 int vol_id;
61 char name[UBIBLOCK_PARAM_LEN+1];
62 };
63
64 struct ubiblock_pdu {
65 struct work_struct work;
66 struct ubi_sgl usgl;
67 };
68
69 /* Numbers of elements set in the @ubiblock_param array */
70 static int ubiblock_devs __initdata;
71
72 /* MTD devices specification parameters */
73 static struct ubiblock_param ubiblock_param[UBIBLOCK_MAX_DEVICES] __initdata;
74
75 struct ubiblock {
76 struct ubi_volume_desc *desc;
77 int ubi_num;
78 int vol_id;
79 int refcnt;
80 int leb_size;
81
82 struct gendisk *gd;
83 struct request_queue *rq;
84
85 struct workqueue_struct *wq;
86
87 struct mutex dev_mutex;
88 struct list_head list;
89 struct blk_mq_tag_set tag_set;
90 };
91
92 /* Linked list of all ubiblock instances */
93 static LIST_HEAD(ubiblock_devices);
94 static DEFINE_IDR(ubiblock_minor_idr);
95 /* Protects ubiblock_devices and ubiblock_minor_idr */
96 static DEFINE_MUTEX(devices_mutex);
97 static int ubiblock_major;
98
ubiblock_set_param(const char * val,const struct kernel_param * kp)99 static int __init ubiblock_set_param(const char *val,
100 const struct kernel_param *kp)
101 {
102 int i, ret;
103 size_t len;
104 struct ubiblock_param *param;
105 char buf[UBIBLOCK_PARAM_LEN];
106 char *pbuf = &buf[0];
107 char *tokens[UBIBLOCK_PARAM_COUNT];
108
109 if (!val)
110 return -EINVAL;
111
112 len = strnlen(val, UBIBLOCK_PARAM_LEN);
113 if (len == 0) {
114 pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
115 return 0;
116 }
117
118 if (len == UBIBLOCK_PARAM_LEN) {
119 pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
120 val, UBIBLOCK_PARAM_LEN);
121 return -EINVAL;
122 }
123
124 strcpy(buf, val);
125
126 /* Get rid of the final newline */
127 if (buf[len - 1] == '\n')
128 buf[len - 1] = '\0';
129
130 for (i = 0; i < UBIBLOCK_PARAM_COUNT; i++)
131 tokens[i] = strsep(&pbuf, ",");
132
133 param = &ubiblock_param[ubiblock_devs];
134 if (tokens[1]) {
135 /* Two parameters: can be 'ubi, vol_id' or 'ubi, vol_name' */
136 ret = kstrtoint(tokens[0], 10, ¶m->ubi_num);
137 if (ret < 0)
138 return -EINVAL;
139
140 /* Second param can be a number or a name */
141 ret = kstrtoint(tokens[1], 10, ¶m->vol_id);
142 if (ret < 0) {
143 param->vol_id = -1;
144 strcpy(param->name, tokens[1]);
145 }
146
147 } else {
148 /* One parameter: must be device path */
149 strcpy(param->name, tokens[0]);
150 param->ubi_num = -1;
151 param->vol_id = -1;
152 }
153
154 ubiblock_devs++;
155
156 return 0;
157 }
158
159 static const struct kernel_param_ops ubiblock_param_ops = {
160 .set = ubiblock_set_param,
161 };
162 module_param_cb(block, &ubiblock_param_ops, NULL, 0);
163 MODULE_PARM_DESC(block, "Attach block devices to UBI volumes. Parameter format: block=<path|dev,num|dev,name>.\n"
164 "Multiple \"block\" parameters may be specified.\n"
165 "UBI volumes may be specified by their number, name, or path to the device node.\n"
166 "Examples\n"
167 "Using the UBI volume path:\n"
168 "ubi.block=/dev/ubi0_0\n"
169 "Using the UBI device, and the volume name:\n"
170 "ubi.block=0,rootfs\n"
171 "Using both UBI device number and UBI volume number:\n"
172 "ubi.block=0,0\n");
173
find_dev_nolock(int ubi_num,int vol_id)174 static struct ubiblock *find_dev_nolock(int ubi_num, int vol_id)
175 {
176 struct ubiblock *dev;
177
178 list_for_each_entry(dev, &ubiblock_devices, list)
179 if (dev->ubi_num == ubi_num && dev->vol_id == vol_id)
180 return dev;
181 return NULL;
182 }
183
ubiblock_read(struct ubiblock_pdu * pdu)184 static int ubiblock_read(struct ubiblock_pdu *pdu)
185 {
186 int ret, leb, offset, bytes_left, to_read;
187 u64 pos;
188 struct request *req = blk_mq_rq_from_pdu(pdu);
189 struct ubiblock *dev = req->q->queuedata;
190
191 to_read = blk_rq_bytes(req);
192 pos = blk_rq_pos(req) << 9;
193
194 /* Get LEB:offset address to read from */
195 offset = do_div(pos, dev->leb_size);
196 leb = pos;
197 bytes_left = to_read;
198
199 while (bytes_left) {
200 /*
201 * We can only read one LEB at a time. Therefore if the read
202 * length is larger than one LEB size, we split the operation.
203 */
204 if (offset + to_read > dev->leb_size)
205 to_read = dev->leb_size - offset;
206
207 ret = ubi_read_sg(dev->desc, leb, &pdu->usgl, offset, to_read);
208 if (ret < 0)
209 return ret;
210
211 bytes_left -= to_read;
212 to_read = bytes_left;
213 leb += 1;
214 offset = 0;
215 }
216 return 0;
217 }
218
ubiblock_open(struct block_device * bdev,fmode_t mode)219 static int ubiblock_open(struct block_device *bdev, fmode_t mode)
220 {
221 struct ubiblock *dev = bdev->bd_disk->private_data;
222 int ret;
223
224 mutex_lock(&dev->dev_mutex);
225 if (dev->refcnt > 0) {
226 /*
227 * The volume is already open, just increase the reference
228 * counter.
229 */
230 goto out_done;
231 }
232
233 /*
234 * We want users to be aware they should only mount us as read-only.
235 * It's just a paranoid check, as write requests will get rejected
236 * in any case.
237 */
238 if (mode & FMODE_WRITE) {
239 ret = -EROFS;
240 goto out_unlock;
241 }
242
243 dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
244 if (IS_ERR(dev->desc)) {
245 dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
246 dev->ubi_num, dev->vol_id);
247 ret = PTR_ERR(dev->desc);
248 dev->desc = NULL;
249 goto out_unlock;
250 }
251
252 out_done:
253 dev->refcnt++;
254 mutex_unlock(&dev->dev_mutex);
255 return 0;
256
257 out_unlock:
258 mutex_unlock(&dev->dev_mutex);
259 return ret;
260 }
261
ubiblock_release(struct gendisk * gd,fmode_t mode)262 static void ubiblock_release(struct gendisk *gd, fmode_t mode)
263 {
264 struct ubiblock *dev = gd->private_data;
265
266 mutex_lock(&dev->dev_mutex);
267 dev->refcnt--;
268 if (dev->refcnt == 0) {
269 ubi_close_volume(dev->desc);
270 dev->desc = NULL;
271 }
272 mutex_unlock(&dev->dev_mutex);
273 }
274
ubiblock_getgeo(struct block_device * bdev,struct hd_geometry * geo)275 static int ubiblock_getgeo(struct block_device *bdev, struct hd_geometry *geo)
276 {
277 /* Some tools might require this information */
278 geo->heads = 1;
279 geo->cylinders = 1;
280 geo->sectors = get_capacity(bdev->bd_disk);
281 geo->start = 0;
282 return 0;
283 }
284
285 static const struct block_device_operations ubiblock_ops = {
286 .owner = THIS_MODULE,
287 .open = ubiblock_open,
288 .release = ubiblock_release,
289 .getgeo = ubiblock_getgeo,
290 };
291
ubiblock_do_work(struct work_struct * work)292 static void ubiblock_do_work(struct work_struct *work)
293 {
294 int ret;
295 struct ubiblock_pdu *pdu = container_of(work, struct ubiblock_pdu, work);
296 struct request *req = blk_mq_rq_from_pdu(pdu);
297
298 blk_mq_start_request(req);
299
300 /*
301 * It is safe to ignore the return value of blk_rq_map_sg() because
302 * the number of sg entries is limited to UBI_MAX_SG_COUNT
303 * and ubi_read_sg() will check that limit.
304 */
305 blk_rq_map_sg(req->q, req, pdu->usgl.sg);
306
307 ret = ubiblock_read(pdu);
308 rq_flush_dcache_pages(req);
309
310 blk_mq_end_request(req, errno_to_blk_status(ret));
311 }
312
ubiblock_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)313 static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
314 const struct blk_mq_queue_data *bd)
315 {
316 struct request *req = bd->rq;
317 struct ubiblock *dev = hctx->queue->queuedata;
318 struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
319
320 switch (req_op(req)) {
321 case REQ_OP_READ:
322 ubi_sgl_init(&pdu->usgl);
323 queue_work(dev->wq, &pdu->work);
324 return BLK_STS_OK;
325 default:
326 return BLK_STS_IOERR;
327 }
328
329 }
330
ubiblock_init_request(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx,unsigned int numa_node)331 static int ubiblock_init_request(struct blk_mq_tag_set *set,
332 struct request *req, unsigned int hctx_idx,
333 unsigned int numa_node)
334 {
335 struct ubiblock_pdu *pdu = blk_mq_rq_to_pdu(req);
336
337 sg_init_table(pdu->usgl.sg, UBI_MAX_SG_COUNT);
338 INIT_WORK(&pdu->work, ubiblock_do_work);
339
340 return 0;
341 }
342
343 static const struct blk_mq_ops ubiblock_mq_ops = {
344 .queue_rq = ubiblock_queue_rq,
345 .init_request = ubiblock_init_request,
346 };
347
calc_disk_capacity(struct ubi_volume_info * vi,u64 * disk_capacity)348 static int calc_disk_capacity(struct ubi_volume_info *vi, u64 *disk_capacity)
349 {
350 u64 size = vi->used_bytes >> 9;
351
352 if (vi->used_bytes % 512) {
353 pr_warn("UBI: block: volume size is not a multiple of 512, "
354 "last %llu bytes are ignored!\n",
355 vi->used_bytes - (size << 9));
356 }
357
358 if ((sector_t)size != size)
359 return -EFBIG;
360
361 *disk_capacity = size;
362
363 return 0;
364 }
365
ubiblock_create(struct ubi_volume_info * vi)366 int ubiblock_create(struct ubi_volume_info *vi)
367 {
368 struct ubiblock *dev;
369 struct gendisk *gd;
370 u64 disk_capacity;
371 int ret;
372
373 ret = calc_disk_capacity(vi, &disk_capacity);
374 if (ret) {
375 return ret;
376 }
377
378 /* Check that the volume isn't already handled */
379 mutex_lock(&devices_mutex);
380 if (find_dev_nolock(vi->ubi_num, vi->vol_id)) {
381 ret = -EEXIST;
382 goto out_unlock;
383 }
384
385 dev = kzalloc(sizeof(struct ubiblock), GFP_KERNEL);
386 if (!dev) {
387 ret = -ENOMEM;
388 goto out_unlock;
389 }
390
391 mutex_init(&dev->dev_mutex);
392
393 dev->ubi_num = vi->ubi_num;
394 dev->vol_id = vi->vol_id;
395 dev->leb_size = vi->usable_leb_size;
396
397 dev->tag_set.ops = &ubiblock_mq_ops;
398 dev->tag_set.queue_depth = 64;
399 dev->tag_set.numa_node = NUMA_NO_NODE;
400 dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
401 dev->tag_set.cmd_size = sizeof(struct ubiblock_pdu);
402 dev->tag_set.driver_data = dev;
403 dev->tag_set.nr_hw_queues = 1;
404
405 ret = blk_mq_alloc_tag_set(&dev->tag_set);
406 if (ret) {
407 dev_err(disk_to_dev(dev->gd), "blk_mq_alloc_tag_set failed");
408 goto out_free_dev;;
409 }
410
411
412 /* Initialize the gendisk of this ubiblock device */
413 gd = blk_mq_alloc_disk(&dev->tag_set, dev);
414 if (IS_ERR(gd)) {
415 ret = PTR_ERR(gd);
416 goto out_free_tags;
417 }
418
419 gd->fops = &ubiblock_ops;
420 gd->major = ubiblock_major;
421 gd->minors = 1;
422 gd->first_minor = idr_alloc(&ubiblock_minor_idr, dev, 0, 0, GFP_KERNEL);
423 if (gd->first_minor < 0) {
424 dev_err(disk_to_dev(gd),
425 "block: dynamic minor allocation failed");
426 ret = -ENODEV;
427 goto out_cleanup_disk;
428 }
429 gd->private_data = dev;
430 sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id);
431 set_capacity(gd, disk_capacity);
432 dev->gd = gd;
433
434 dev->rq = gd->queue;
435 blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT);
436
437 /*
438 * Create one workqueue per volume (per registered block device).
439 * Rembember workqueues are cheap, they're not threads.
440 */
441 dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
442 if (!dev->wq) {
443 ret = -ENOMEM;
444 goto out_remove_minor;
445 }
446
447 list_add_tail(&dev->list, &ubiblock_devices);
448
449 /* Must be the last step: anyone can call file ops from now on */
450 add_disk(dev->gd);
451 dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
452 dev->ubi_num, dev->vol_id, vi->name);
453 mutex_unlock(&devices_mutex);
454 return 0;
455
456 out_remove_minor:
457 idr_remove(&ubiblock_minor_idr, gd->first_minor);
458 out_cleanup_disk:
459 blk_cleanup_disk(dev->gd);
460 out_free_tags:
461 blk_mq_free_tag_set(&dev->tag_set);
462 out_free_dev:
463 kfree(dev);
464 out_unlock:
465 mutex_unlock(&devices_mutex);
466
467 return ret;
468 }
469
ubiblock_cleanup(struct ubiblock * dev)470 static void ubiblock_cleanup(struct ubiblock *dev)
471 {
472 /* Stop new requests to arrive */
473 del_gendisk(dev->gd);
474 /* Flush pending work */
475 destroy_workqueue(dev->wq);
476 /* Finally destroy the blk queue */
477 dev_info(disk_to_dev(dev->gd), "released");
478 blk_cleanup_disk(dev->gd);
479 blk_mq_free_tag_set(&dev->tag_set);
480 idr_remove(&ubiblock_minor_idr, dev->gd->first_minor);
481 }
482
ubiblock_remove(struct ubi_volume_info * vi)483 int ubiblock_remove(struct ubi_volume_info *vi)
484 {
485 struct ubiblock *dev;
486 int ret;
487
488 mutex_lock(&devices_mutex);
489 dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
490 if (!dev) {
491 ret = -ENODEV;
492 goto out_unlock;
493 }
494
495 /* Found a device, let's lock it so we can check if it's busy */
496 mutex_lock(&dev->dev_mutex);
497 if (dev->refcnt > 0) {
498 ret = -EBUSY;
499 goto out_unlock_dev;
500 }
501
502 /* Remove from device list */
503 list_del(&dev->list);
504 ubiblock_cleanup(dev);
505 mutex_unlock(&dev->dev_mutex);
506 mutex_unlock(&devices_mutex);
507
508 kfree(dev);
509 return 0;
510
511 out_unlock_dev:
512 mutex_unlock(&dev->dev_mutex);
513 out_unlock:
514 mutex_unlock(&devices_mutex);
515 return ret;
516 }
517
ubiblock_resize(struct ubi_volume_info * vi)518 static int ubiblock_resize(struct ubi_volume_info *vi)
519 {
520 struct ubiblock *dev;
521 u64 disk_capacity;
522 int ret;
523
524 /*
525 * Need to lock the device list until we stop using the device,
526 * otherwise the device struct might get released in
527 * 'ubiblock_remove()'.
528 */
529 mutex_lock(&devices_mutex);
530 dev = find_dev_nolock(vi->ubi_num, vi->vol_id);
531 if (!dev) {
532 mutex_unlock(&devices_mutex);
533 return -ENODEV;
534 }
535
536 ret = calc_disk_capacity(vi, &disk_capacity);
537 if (ret) {
538 mutex_unlock(&devices_mutex);
539 if (ret == -EFBIG) {
540 dev_warn(disk_to_dev(dev->gd),
541 "the volume is too big (%d LEBs), cannot resize",
542 vi->size);
543 }
544 return ret;
545 }
546
547 mutex_lock(&dev->dev_mutex);
548
549 if (get_capacity(dev->gd) != disk_capacity) {
550 set_capacity(dev->gd, disk_capacity);
551 dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
552 vi->used_bytes);
553 }
554 mutex_unlock(&dev->dev_mutex);
555 mutex_unlock(&devices_mutex);
556 return 0;
557 }
558
ubiblock_notify(struct notifier_block * nb,unsigned long notification_type,void * ns_ptr)559 static int ubiblock_notify(struct notifier_block *nb,
560 unsigned long notification_type, void *ns_ptr)
561 {
562 struct ubi_notification *nt = ns_ptr;
563
564 switch (notification_type) {
565 case UBI_VOLUME_ADDED:
566 /*
567 * We want to enforce explicit block device creation for
568 * volumes, so when a volume is added we do nothing.
569 */
570 break;
571 case UBI_VOLUME_REMOVED:
572 ubiblock_remove(&nt->vi);
573 break;
574 case UBI_VOLUME_RESIZED:
575 ubiblock_resize(&nt->vi);
576 break;
577 case UBI_VOLUME_UPDATED:
578 /*
579 * If the volume is static, a content update might mean the
580 * size (i.e. used_bytes) was also changed.
581 */
582 if (nt->vi.vol_type == UBI_STATIC_VOLUME)
583 ubiblock_resize(&nt->vi);
584 break;
585 default:
586 break;
587 }
588 return NOTIFY_OK;
589 }
590
591 static struct notifier_block ubiblock_notifier = {
592 .notifier_call = ubiblock_notify,
593 };
594
595 static struct ubi_volume_desc * __init
open_volume_desc(const char * name,int ubi_num,int vol_id)596 open_volume_desc(const char *name, int ubi_num, int vol_id)
597 {
598 if (ubi_num == -1)
599 /* No ubi num, name must be a vol device path */
600 return ubi_open_volume_path(name, UBI_READONLY);
601 else if (vol_id == -1)
602 /* No vol_id, must be vol_name */
603 return ubi_open_volume_nm(ubi_num, name, UBI_READONLY);
604 else
605 return ubi_open_volume(ubi_num, vol_id, UBI_READONLY);
606 }
607
ubiblock_create_from_param(void)608 static void __init ubiblock_create_from_param(void)
609 {
610 int i, ret = 0;
611 struct ubiblock_param *p;
612 struct ubi_volume_desc *desc;
613 struct ubi_volume_info vi;
614
615 /*
616 * If there is an error creating one of the ubiblocks, continue on to
617 * create the following ubiblocks. This helps in a circumstance where
618 * the kernel command-line specifies multiple block devices and some
619 * may be broken, but we still want the working ones to come up.
620 */
621 for (i = 0; i < ubiblock_devs; i++) {
622 p = &ubiblock_param[i];
623
624 desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
625 if (IS_ERR(desc)) {
626 pr_err(
627 "UBI: block: can't open volume on ubi%d_%d, err=%ld\n",
628 p->ubi_num, p->vol_id, PTR_ERR(desc));
629 continue;
630 }
631
632 ubi_get_volume_info(desc, &vi);
633 ubi_close_volume(desc);
634
635 ret = ubiblock_create(&vi);
636 if (ret) {
637 pr_err(
638 "UBI: block: can't add '%s' volume on ubi%d_%d, err=%d\n",
639 vi.name, p->ubi_num, p->vol_id, ret);
640 continue;
641 }
642 }
643 }
644
ubiblock_remove_all(void)645 static void ubiblock_remove_all(void)
646 {
647 struct ubiblock *next;
648 struct ubiblock *dev;
649
650 mutex_lock(&devices_mutex);
651 list_for_each_entry_safe(dev, next, &ubiblock_devices, list) {
652 /* The module is being forcefully removed */
653 WARN_ON(dev->desc);
654 /* Remove from device list */
655 list_del(&dev->list);
656 ubiblock_cleanup(dev);
657 kfree(dev);
658 }
659 mutex_unlock(&devices_mutex);
660 }
661
ubiblock_init(void)662 int __init ubiblock_init(void)
663 {
664 int ret;
665
666 ubiblock_major = register_blkdev(0, "ubiblock");
667 if (ubiblock_major < 0)
668 return ubiblock_major;
669
670 /*
671 * Attach block devices from 'block=' module param.
672 * Even if one block device in the param list fails to come up,
673 * still allow the module to load and leave any others up.
674 */
675 ubiblock_create_from_param();
676
677 /*
678 * Block devices are only created upon user requests, so we ignore
679 * existing volumes.
680 */
681 ret = ubi_register_volume_notifier(&ubiblock_notifier, 1);
682 if (ret)
683 goto err_unreg;
684 return 0;
685
686 err_unreg:
687 unregister_blkdev(ubiblock_major, "ubiblock");
688 ubiblock_remove_all();
689 return ret;
690 }
691
ubiblock_exit(void)692 void __exit ubiblock_exit(void)
693 {
694 ubi_unregister_volume_notifier(&ubiblock_notifier);
695 ubiblock_remove_all();
696 unregister_blkdev(ubiblock_major, "ubiblock");
697 }
698