1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Interface to Linux block layer for MTD 'translation layers'.
4 *
5 * Copyright © 2003-2010 David Woodhouse <dwmw2@infradead.org>
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/fs.h>
13 #include <linux/mtd/blktrans.h>
14 #include <linux/mtd/mtd.h>
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/blkpg.h>
18 #include <linux/spinlock.h>
19 #include <linux/hdreg.h>
20 #include <linux/mutex.h>
21 #include <linux/uaccess.h>
22
23 #include "mtdcore.h"
24
25 static LIST_HEAD(blktrans_majors);
26 static DEFINE_MUTEX(blktrans_ref_mutex);
27
blktrans_dev_release(struct kref * kref)28 static void blktrans_dev_release(struct kref *kref)
29 {
30 struct mtd_blktrans_dev *dev =
31 container_of(kref, struct mtd_blktrans_dev, ref);
32
33 dev->disk->private_data = NULL;
34 blk_cleanup_queue(dev->rq);
35 blk_mq_free_tag_set(dev->tag_set);
36 kfree(dev->tag_set);
37 put_disk(dev->disk);
38 list_del(&dev->list);
39 kfree(dev);
40 }
41
blktrans_dev_get(struct gendisk * disk)42 static struct mtd_blktrans_dev *blktrans_dev_get(struct gendisk *disk)
43 {
44 struct mtd_blktrans_dev *dev;
45
46 mutex_lock(&blktrans_ref_mutex);
47 dev = disk->private_data;
48
49 if (!dev)
50 goto unlock;
51 kref_get(&dev->ref);
52 unlock:
53 mutex_unlock(&blktrans_ref_mutex);
54 return dev;
55 }
56
blktrans_dev_put(struct mtd_blktrans_dev * dev)57 static void blktrans_dev_put(struct mtd_blktrans_dev *dev)
58 {
59 mutex_lock(&blktrans_ref_mutex);
60 kref_put(&dev->ref, blktrans_dev_release);
61 mutex_unlock(&blktrans_ref_mutex);
62 }
63
64
do_blktrans_request(struct mtd_blktrans_ops * tr,struct mtd_blktrans_dev * dev,struct request * req)65 static blk_status_t do_blktrans_request(struct mtd_blktrans_ops *tr,
66 struct mtd_blktrans_dev *dev,
67 struct request *req)
68 {
69 unsigned long block, nsect;
70 char *buf;
71
72 block = blk_rq_pos(req) << 9 >> tr->blkshift;
73 nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
74
75 if (req_op(req) == REQ_OP_FLUSH) {
76 if (tr->flush(dev))
77 return BLK_STS_IOERR;
78 return BLK_STS_OK;
79 }
80
81 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) >
82 get_capacity(req->rq_disk))
83 return BLK_STS_IOERR;
84
85 switch (req_op(req)) {
86 case REQ_OP_DISCARD:
87 if (tr->discard(dev, block, nsect))
88 return BLK_STS_IOERR;
89 return BLK_STS_OK;
90 case REQ_OP_READ:
91 buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
92 for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
93 if (tr->readsect(dev, block, buf)) {
94 kunmap(bio_page(req->bio));
95 return BLK_STS_IOERR;
96 }
97 }
98 kunmap(bio_page(req->bio));
99 rq_flush_dcache_pages(req);
100 return BLK_STS_OK;
101 case REQ_OP_WRITE:
102 if (!tr->writesect)
103 return BLK_STS_IOERR;
104
105 rq_flush_dcache_pages(req);
106 buf = kmap(bio_page(req->bio)) + bio_offset(req->bio);
107 for (; nsect > 0; nsect--, block++, buf += tr->blksize) {
108 if (tr->writesect(dev, block, buf)) {
109 kunmap(bio_page(req->bio));
110 return BLK_STS_IOERR;
111 }
112 }
113 kunmap(bio_page(req->bio));
114 return BLK_STS_OK;
115 default:
116 return BLK_STS_IOERR;
117 }
118 }
119
mtd_blktrans_cease_background(struct mtd_blktrans_dev * dev)120 int mtd_blktrans_cease_background(struct mtd_blktrans_dev *dev)
121 {
122 return dev->bg_stop;
123 }
124 EXPORT_SYMBOL_GPL(mtd_blktrans_cease_background);
125
mtd_next_request(struct mtd_blktrans_dev * dev)126 static struct request *mtd_next_request(struct mtd_blktrans_dev *dev)
127 {
128 struct request *rq;
129
130 rq = list_first_entry_or_null(&dev->rq_list, struct request, queuelist);
131 if (rq) {
132 list_del_init(&rq->queuelist);
133 blk_mq_start_request(rq);
134 return rq;
135 }
136
137 return NULL;
138 }
139
mtd_blktrans_work(struct mtd_blktrans_dev * dev)140 static void mtd_blktrans_work(struct mtd_blktrans_dev *dev)
141 __releases(&dev->queue_lock)
142 __acquires(&dev->queue_lock)
143 {
144 struct mtd_blktrans_ops *tr = dev->tr;
145 struct request *req = NULL;
146 int background_done = 0;
147
148 while (1) {
149 blk_status_t res;
150
151 dev->bg_stop = false;
152 if (!req && !(req = mtd_next_request(dev))) {
153 if (tr->background && !background_done) {
154 spin_unlock_irq(&dev->queue_lock);
155 mutex_lock(&dev->lock);
156 tr->background(dev);
157 mutex_unlock(&dev->lock);
158 spin_lock_irq(&dev->queue_lock);
159 /*
160 * Do background processing just once per idle
161 * period.
162 */
163 background_done = !dev->bg_stop;
164 continue;
165 }
166 break;
167 }
168
169 spin_unlock_irq(&dev->queue_lock);
170
171 mutex_lock(&dev->lock);
172 res = do_blktrans_request(dev->tr, dev, req);
173 mutex_unlock(&dev->lock);
174
175 if (!blk_update_request(req, res, blk_rq_cur_bytes(req))) {
176 __blk_mq_end_request(req, res);
177 req = NULL;
178 }
179
180 background_done = 0;
181 spin_lock_irq(&dev->queue_lock);
182 }
183 }
184
mtd_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)185 static blk_status_t mtd_queue_rq(struct blk_mq_hw_ctx *hctx,
186 const struct blk_mq_queue_data *bd)
187 {
188 struct mtd_blktrans_dev *dev;
189
190 dev = hctx->queue->queuedata;
191 if (!dev) {
192 blk_mq_start_request(bd->rq);
193 return BLK_STS_IOERR;
194 }
195
196 spin_lock_irq(&dev->queue_lock);
197 list_add_tail(&bd->rq->queuelist, &dev->rq_list);
198 mtd_blktrans_work(dev);
199 spin_unlock_irq(&dev->queue_lock);
200
201 return BLK_STS_OK;
202 }
203
blktrans_open(struct block_device * bdev,fmode_t mode)204 static int blktrans_open(struct block_device *bdev, fmode_t mode)
205 {
206 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
207 int ret = 0;
208
209 if (!dev)
210 return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/
211
212 mtd_table_mutex_lock();
213 mutex_lock(&dev->lock);
214
215 if (dev->open)
216 goto unlock;
217
218 kref_get(&dev->ref);
219 __module_get(dev->tr->owner);
220
221 if (!dev->mtd)
222 goto unlock;
223
224 if (dev->tr->open) {
225 ret = dev->tr->open(dev);
226 if (ret)
227 goto error_put;
228 }
229
230 ret = __get_mtd_device(dev->mtd);
231 if (ret)
232 goto error_release;
233 dev->file_mode = mode;
234
235 unlock:
236 dev->open++;
237 mutex_unlock(&dev->lock);
238 mtd_table_mutex_unlock();
239 blktrans_dev_put(dev);
240 return ret;
241
242 error_release:
243 if (dev->tr->release)
244 dev->tr->release(dev);
245 error_put:
246 module_put(dev->tr->owner);
247 kref_put(&dev->ref, blktrans_dev_release);
248 mutex_unlock(&dev->lock);
249 mtd_table_mutex_unlock();
250 blktrans_dev_put(dev);
251 return ret;
252 }
253
blktrans_release(struct gendisk * disk,fmode_t mode)254 static void blktrans_release(struct gendisk *disk, fmode_t mode)
255 {
256 struct mtd_blktrans_dev *dev = blktrans_dev_get(disk);
257
258 if (!dev)
259 return;
260
261 mtd_table_mutex_lock();
262 mutex_lock(&dev->lock);
263
264 if (--dev->open)
265 goto unlock;
266
267 kref_put(&dev->ref, blktrans_dev_release);
268 module_put(dev->tr->owner);
269
270 if (dev->mtd) {
271 if (dev->tr->release)
272 dev->tr->release(dev);
273 __put_mtd_device(dev->mtd);
274 }
275 unlock:
276 mutex_unlock(&dev->lock);
277 mtd_table_mutex_unlock();
278 blktrans_dev_put(dev);
279 }
280
blktrans_getgeo(struct block_device * bdev,struct hd_geometry * geo)281 static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
282 {
283 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
284 int ret = -ENXIO;
285
286 if (!dev)
287 return ret;
288
289 mutex_lock(&dev->lock);
290
291 if (!dev->mtd)
292 goto unlock;
293
294 ret = dev->tr->getgeo ? dev->tr->getgeo(dev, geo) : -ENOTTY;
295 unlock:
296 mutex_unlock(&dev->lock);
297 blktrans_dev_put(dev);
298 return ret;
299 }
300
blktrans_ioctl(struct block_device * bdev,fmode_t mode,unsigned int cmd,unsigned long arg)301 static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
302 unsigned int cmd, unsigned long arg)
303 {
304 struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
305 int ret = -ENXIO;
306
307 if (!dev)
308 return ret;
309
310 mutex_lock(&dev->lock);
311
312 if (!dev->mtd)
313 goto unlock;
314
315 switch (cmd) {
316 case BLKFLSBUF:
317 ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
318 break;
319 default:
320 ret = -ENOTTY;
321 }
322 unlock:
323 mutex_unlock(&dev->lock);
324 blktrans_dev_put(dev);
325 return ret;
326 }
327
328 static const struct block_device_operations mtd_block_ops = {
329 .owner = THIS_MODULE,
330 .open = blktrans_open,
331 .release = blktrans_release,
332 .ioctl = blktrans_ioctl,
333 .getgeo = blktrans_getgeo,
334 };
335
336 static const struct blk_mq_ops mtd_mq_ops = {
337 .queue_rq = mtd_queue_rq,
338 };
339
add_mtd_blktrans_dev(struct mtd_blktrans_dev * new)340 int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
341 {
342 struct mtd_blktrans_ops *tr = new->tr;
343 struct mtd_blktrans_dev *d;
344 int last_devnum = -1;
345 struct gendisk *gd;
346 int ret;
347
348 mtd_table_assert_mutex_locked();
349
350 mutex_lock(&blktrans_ref_mutex);
351 list_for_each_entry(d, &tr->devs, list) {
352 if (new->devnum == -1) {
353 /* Use first free number */
354 if (d->devnum != last_devnum+1) {
355 /* Found a free devnum. Plug it in here */
356 new->devnum = last_devnum+1;
357 list_add_tail(&new->list, &d->list);
358 goto added;
359 }
360 } else if (d->devnum == new->devnum) {
361 /* Required number taken */
362 mutex_unlock(&blktrans_ref_mutex);
363 return -EBUSY;
364 } else if (d->devnum > new->devnum) {
365 /* Required number was free */
366 list_add_tail(&new->list, &d->list);
367 goto added;
368 }
369 last_devnum = d->devnum;
370 }
371
372 ret = -EBUSY;
373 if (new->devnum == -1)
374 new->devnum = last_devnum+1;
375
376 /* Check that the device and any partitions will get valid
377 * minor numbers and that the disk naming code below can cope
378 * with this number. */
379 if (new->devnum > (MINORMASK >> tr->part_bits) ||
380 (tr->part_bits && new->devnum >= 27 * 26)) {
381 mutex_unlock(&blktrans_ref_mutex);
382 goto error1;
383 }
384
385 list_add_tail(&new->list, &tr->devs);
386 added:
387 mutex_unlock(&blktrans_ref_mutex);
388
389 mutex_init(&new->lock);
390 kref_init(&new->ref);
391 if (!tr->writesect)
392 new->readonly = 1;
393
394 /* Create gendisk */
395 ret = -ENOMEM;
396 gd = alloc_disk(1 << tr->part_bits);
397
398 if (!gd)
399 goto error2;
400
401 new->disk = gd;
402 gd->private_data = new;
403 gd->major = tr->major;
404 gd->first_minor = (new->devnum) << tr->part_bits;
405 gd->fops = &mtd_block_ops;
406
407 if (tr->part_bits)
408 if (new->devnum < 26)
409 snprintf(gd->disk_name, sizeof(gd->disk_name),
410 "%s%c", tr->name, 'a' + new->devnum);
411 else
412 snprintf(gd->disk_name, sizeof(gd->disk_name),
413 "%s%c%c", tr->name,
414 'a' - 1 + new->devnum / 26,
415 'a' + new->devnum % 26);
416 else
417 snprintf(gd->disk_name, sizeof(gd->disk_name),
418 "%s%d", tr->name, new->devnum);
419
420 set_capacity(gd, ((u64)new->size * tr->blksize) >> 9);
421
422 /* Create the request queue */
423 spin_lock_init(&new->queue_lock);
424 INIT_LIST_HEAD(&new->rq_list);
425
426 new->tag_set = kzalloc(sizeof(*new->tag_set), GFP_KERNEL);
427 if (!new->tag_set)
428 goto error3;
429
430 new->rq = blk_mq_init_sq_queue(new->tag_set, &mtd_mq_ops, 2,
431 BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
432 if (IS_ERR(new->rq)) {
433 ret = PTR_ERR(new->rq);
434 new->rq = NULL;
435 goto error4;
436 }
437
438 if (tr->flush)
439 blk_queue_write_cache(new->rq, true, false);
440
441 new->rq->queuedata = new;
442 blk_queue_logical_block_size(new->rq, tr->blksize);
443
444 blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq);
445 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq);
446
447 if (tr->discard) {
448 blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq);
449 blk_queue_max_discard_sectors(new->rq, UINT_MAX);
450 new->rq->limits.discard_granularity = tr->blksize;
451 }
452
453 gd->queue = new->rq;
454
455 if (new->readonly)
456 set_disk_ro(gd, 1);
457
458 device_add_disk(&new->mtd->dev, gd, NULL);
459
460 if (new->disk_attributes) {
461 ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
462 new->disk_attributes);
463 WARN_ON(ret);
464 }
465 return 0;
466 error4:
467 kfree(new->tag_set);
468 error3:
469 put_disk(new->disk);
470 error2:
471 list_del(&new->list);
472 error1:
473 return ret;
474 }
475
del_mtd_blktrans_dev(struct mtd_blktrans_dev * old)476 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
477 {
478 unsigned long flags;
479
480 mtd_table_assert_mutex_locked();
481 if (old->disk_attributes)
482 sysfs_remove_group(&disk_to_dev(old->disk)->kobj,
483 old->disk_attributes);
484
485 /* Stop new requests to arrive */
486 del_gendisk(old->disk);
487
488 /* Kill current requests */
489 spin_lock_irqsave(&old->queue_lock, flags);
490 old->rq->queuedata = NULL;
491 spin_unlock_irqrestore(&old->queue_lock, flags);
492
493 /* freeze+quiesce queue to ensure all requests are flushed */
494 blk_mq_freeze_queue(old->rq);
495 blk_mq_quiesce_queue(old->rq);
496 blk_mq_unquiesce_queue(old->rq);
497 blk_mq_unfreeze_queue(old->rq);
498
499 /* If the device is currently open, tell trans driver to close it,
500 then put mtd device, and don't touch it again */
501 mutex_lock(&old->lock);
502 if (old->open) {
503 if (old->tr->release)
504 old->tr->release(old);
505 __put_mtd_device(old->mtd);
506 }
507
508 old->mtd = NULL;
509
510 mutex_unlock(&old->lock);
511 blktrans_dev_put(old);
512 return 0;
513 }
514
blktrans_notify_remove(struct mtd_info * mtd)515 static void blktrans_notify_remove(struct mtd_info *mtd)
516 {
517 struct mtd_blktrans_ops *tr;
518 struct mtd_blktrans_dev *dev, *next;
519
520 list_for_each_entry(tr, &blktrans_majors, list)
521 list_for_each_entry_safe(dev, next, &tr->devs, list)
522 if (dev->mtd == mtd)
523 tr->remove_dev(dev);
524 }
525
blktrans_notify_add(struct mtd_info * mtd)526 static void blktrans_notify_add(struct mtd_info *mtd)
527 {
528 struct mtd_blktrans_ops *tr;
529
530 if (mtd->type == MTD_ABSENT)
531 return;
532
533 list_for_each_entry(tr, &blktrans_majors, list)
534 tr->add_mtd(tr, mtd);
535 }
536
537 static struct mtd_notifier blktrans_notifier = {
538 .add = blktrans_notify_add,
539 .remove = blktrans_notify_remove,
540 };
541
register_mtd_blktrans(struct mtd_blktrans_ops * tr)542 int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
543 {
544 struct mtd_info *mtd;
545 int ret;
546
547 /* Register the notifier if/when the first device type is
548 registered, to prevent the link/init ordering from fucking
549 us over. */
550 if (!blktrans_notifier.list.next)
551 register_mtd_user(&blktrans_notifier);
552
553
554 mtd_table_mutex_lock();
555
556 ret = register_blkdev(tr->major, tr->name);
557 if (ret < 0) {
558 printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
559 tr->name, tr->major, ret);
560 mtd_table_mutex_unlock();
561 return ret;
562 }
563
564 if (ret)
565 tr->major = ret;
566
567 tr->blkshift = ffs(tr->blksize) - 1;
568
569 INIT_LIST_HEAD(&tr->devs);
570 list_add(&tr->list, &blktrans_majors);
571
572 mtd_for_each_device(mtd)
573 if (mtd->type != MTD_ABSENT)
574 tr->add_mtd(tr, mtd);
575
576 mtd_table_mutex_unlock();
577 return 0;
578 }
579
deregister_mtd_blktrans(struct mtd_blktrans_ops * tr)580 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
581 {
582 struct mtd_blktrans_dev *dev, *next;
583
584 mtd_table_mutex_lock();
585
586 /* Remove it from the list of active majors */
587 list_del(&tr->list);
588
589 list_for_each_entry_safe(dev, next, &tr->devs, list)
590 tr->remove_dev(dev);
591
592 unregister_blkdev(tr->major, tr->name);
593 mtd_table_mutex_unlock();
594
595 BUG_ON(!list_empty(&tr->devs));
596 return 0;
597 }
598
mtd_blktrans_exit(void)599 static void __exit mtd_blktrans_exit(void)
600 {
601 /* No race here -- if someone's currently in register_mtd_blktrans
602 we're screwed anyway. */
603 if (blktrans_notifier.list.next)
604 unregister_mtd_user(&blktrans_notifier);
605 }
606
607 module_exit(mtd_blktrans_exit);
608
609 EXPORT_SYMBOL_GPL(register_mtd_blktrans);
610 EXPORT_SYMBOL_GPL(deregister_mtd_blktrans);
611 EXPORT_SYMBOL_GPL(add_mtd_blktrans_dev);
612 EXPORT_SYMBOL_GPL(del_mtd_blktrans_dev);
613
614 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
615 MODULE_LICENSE("GPL");
616 MODULE_DESCRIPTION("Common interface to block layer for MTD 'translation layers'");
617