Lines Matching +full:out +full:- +full:volume +full:- +full:limit
1 // SPDX-License-Identifier: GPL-2.0-or-later
37 /* Maximum number of comma-separated items in the 'mtd=' parameter */
50 * struct mtd_dev_param - MTD device parameter description data structure.
69 /* UBI module parameter to enable fastmap automatically on non-fastmap images */
74 /* Slab cache for wear-leveling entries */
90 /* Protects @ubi_devices and @ubi->ref_count */
145 * ubi_volume_notify - send a volume change notification.
147 * @vol: volume description object of the changed volume
150 * This is a helper function which notifies all subscribers about a volume
151 * change event (creation, removal, re-sizing, re-naming, updating). Returns
176 * ubi_notify_all - send a notification to all volumes.
182 * notification for each volume. If @nb is %NULL, then all registered notifiers
193 mutex_lock(&ubi->device_mutex); in ubi_notify_all()
194 for (i = 0; i < ubi->vtbl_slots; i++) { in ubi_notify_all()
196 * Since the @ubi->device is locked, and we are not going to in ubi_notify_all()
197 * change @ubi->volumes, we do not have to lock in ubi_notify_all()
198 * @ubi->volumes_lock. in ubi_notify_all()
200 if (!ubi->volumes[i]) in ubi_notify_all()
203 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi); in ubi_notify_all()
205 nb->notifier_call(nb, ntype, &nt); in ubi_notify_all()
211 mutex_unlock(&ubi->device_mutex); in ubi_notify_all()
217 * ubi_enumerate_volumes - send "add" notification for all existing volumes.
221 * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all
245 * ubi_get_device - get UBI device.
260 ubi_assert(ubi->ref_count >= 0); in ubi_get_device()
261 ubi->ref_count += 1; in ubi_get_device()
262 get_device(&ubi->dev); in ubi_get_device()
270 * ubi_put_device - drop an UBI device reference.
276 ubi->ref_count -= 1; in ubi_put_device()
277 put_device(&ubi->dev); in ubi_put_device()
282 * ubi_get_by_major - get UBI device by character device major number.
296 if (ubi && MAJOR(ubi->cdev.dev) == major) { in ubi_get_by_major()
297 ubi_assert(ubi->ref_count >= 0); in ubi_get_by_major()
298 ubi->ref_count += 1; in ubi_get_by_major()
299 get_device(&ubi->dev); in ubi_get_by_major()
310 * ubi_major2num - get UBI device number by character device major number.
314 * device was not found, this function returns -ENODEV, otherwise the UBI device
319 int i, ubi_num = -ENODEV; in ubi_major2num()
325 if (ubi && MAJOR(ubi->cdev.dev) == major) { in ubi_major2num()
326 ubi_num = ubi->ubi_num; in ubi_major2num()
347 * 'ubi_get_device()' will return -ENODEV and we fail. in dev_attribute_show()
350 * we still can use 'ubi->ubi_num'. in dev_attribute_show()
355 ret = sprintf(buf, "%d\n", ubi->leb_size); in dev_attribute_show()
357 ret = sprintf(buf, "%d\n", ubi->avail_pebs); in dev_attribute_show()
359 ret = sprintf(buf, "%d\n", ubi->good_peb_count); in dev_attribute_show()
361 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT); in dev_attribute_show()
363 ret = sprintf(buf, "%d\n", ubi->max_ec); in dev_attribute_show()
365 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); in dev_attribute_show()
367 ret = sprintf(buf, "%d\n", ubi->bad_peb_count); in dev_attribute_show()
369 ret = sprintf(buf, "%d\n", ubi->vtbl_slots); in dev_attribute_show()
371 ret = sprintf(buf, "%d\n", ubi->min_io_size); in dev_attribute_show()
373 ret = sprintf(buf, "%d\n", ubi->thread_enabled); in dev_attribute_show()
375 ret = sprintf(buf, "%d\n", ubi->mtd->index); in dev_attribute_show()
377 ret = sprintf(buf, "%d\n", ubi->ro_mode); in dev_attribute_show()
379 ret = -EINVAL; in dev_attribute_show()
409 * kill_volumes - destroy all user volumes.
416 for (i = 0; i < ubi->vtbl_slots; i++) in kill_volumes()
417 if (ubi->volumes[i]) in kill_volumes()
418 ubi_free_volume(ubi, ubi->volumes[i]); in kill_volumes()
422 * uif_init - initialize user interfaces for an UBI device.
437 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); in uif_init()
441 * dynamically. Major numbers of volume character devices are in uif_init()
444 * volume character devices start from 1. Thus, we allocate one major in uif_init()
445 * number and ubi->vtbl_slots + 1 minor numbers. in uif_init()
447 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name); in uif_init()
453 ubi->dev.devt = dev; in uif_init()
456 cdev_init(&ubi->cdev, &ubi_cdev_operations); in uif_init()
457 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev)); in uif_init()
458 ubi->cdev.owner = THIS_MODULE; in uif_init()
460 dev_set_name(&ubi->dev, UBI_NAME_STR "%d", ubi->ubi_num); in uif_init()
461 err = cdev_device_add(&ubi->cdev, &ubi->dev); in uif_init()
465 for (i = 0; i < ubi->vtbl_slots; i++) in uif_init()
466 if (ubi->volumes[i]) { in uif_init()
467 err = ubi_add_volume(ubi, ubi->volumes[i]); in uif_init()
469 ubi_err(ubi, "cannot add volume %d", i); in uif_init()
478 cdev_device_del(&ubi->cdev, &ubi->dev); in uif_init()
480 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); in uif_init()
482 ubi->ubi_name, err); in uif_init()
487 * uif_close - close user interfaces for an UBI device.
490 * Note, since this function un-registers UBI volume device objects (@vol->dev),
497 cdev_device_del(&ubi->cdev, &ubi->dev); in uif_close()
498 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); in uif_close()
502 * ubi_free_volumes_from - free volumes from specific index.
504 * @from: the start index used for volume free.
510 for (i = from; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { in ubi_free_volumes_from()
511 if (!ubi->volumes[i]) in ubi_free_volumes_from()
513 ubi_eba_replace_table(ubi->volumes[i], NULL); in ubi_free_volumes_from()
514 ubi_fastmap_destroy_checkmap(ubi->volumes[i]); in ubi_free_volumes_from()
515 kfree(ubi->volumes[i]); in ubi_free_volumes_from()
516 ubi->volumes[i] = NULL; in ubi_free_volumes_from()
521 * ubi_free_all_volumes - free all volumes.
530 * ubi_free_internal_volumes - free internal volumes.
535 ubi_free_volumes_from(ubi, ubi->vtbl_slots); in ubi_free_internal_volumes()
540 int limit, device_pebs; in get_bad_peb_limit() local
547 * limit if it is supported by the device. in get_bad_peb_limit()
549 limit = mtd_max_bad_blocks(ubi->mtd, 0, ubi->mtd->size); in get_bad_peb_limit()
550 if (limit < 0) in get_bad_peb_limit()
552 return limit; in get_bad_peb_limit()
562 * the MTD partition we are attaching (ubi->mtd). in get_bad_peb_limit()
564 device_size = mtd_get_device_size(ubi->mtd); in get_bad_peb_limit()
565 device_pebs = mtd_div_by_eb(device_size, ubi->mtd); in get_bad_peb_limit()
566 limit = mult_frac(device_pebs, max_beb_per1024, 1024); in get_bad_peb_limit()
569 if (mult_frac(limit, 1024, max_beb_per1024) < device_pebs) in get_bad_peb_limit()
570 limit += 1; in get_bad_peb_limit()
572 return limit; in get_bad_peb_limit()
576 * io_init - initialize I/O sub-system for a given UBI device.
580 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are
582 * o EC header is always at offset zero - this cannot be changed;
584 * aligned to @io->hdrs_min_io_size;
586 * @io->min_io_size
596 if (ubi->mtd->numeraseregions != 0) { in io_init()
600 * characteristics. It looks like mostly multi-region flashes in io_init()
607 return -EINVAL; in io_init()
610 if (ubi->vid_hdr_offset < 0) in io_init()
611 return -EINVAL; in io_init()
618 ubi->peb_size = ubi->mtd->erasesize; in io_init()
619 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); in io_init()
620 ubi->flash_size = ubi->mtd->size; in io_init()
622 if (mtd_can_have_bb(ubi->mtd)) { in io_init()
623 ubi->bad_allowed = 1; in io_init()
624 ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024); in io_init()
627 if (ubi->mtd->type == MTD_NORFLASH) { in io_init()
628 ubi_assert(ubi->mtd->writesize == 1); in io_init()
629 ubi->nor_flash = 1; in io_init()
632 ubi->min_io_size = ubi->mtd->writesize; in io_init()
633 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; in io_init()
640 if (!is_power_of_2(ubi->min_io_size)) { in io_init()
642 ubi->min_io_size); in io_init()
643 return -EINVAL; in io_init()
646 ubi_assert(ubi->hdrs_min_io_size > 0); in io_init()
647 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); in io_init()
648 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); in io_init()
650 ubi->max_write_size = ubi->mtd->writebufsize; in io_init()
655 if (ubi->max_write_size < ubi->min_io_size || in io_init()
656 ubi->max_write_size % ubi->min_io_size || in io_init()
657 !is_power_of_2(ubi->max_write_size)) { in io_init()
659 ubi->max_write_size, ubi->min_io_size); in io_init()
660 return -EINVAL; in io_init()
664 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); in io_init()
665 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); in io_init()
667 dbg_gen("min_io_size %d", ubi->min_io_size); in io_init()
668 dbg_gen("max_write_size %d", ubi->max_write_size); in io_init()
669 dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size); in io_init()
670 dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize); in io_init()
671 dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize); in io_init()
673 if (ubi->vid_hdr_offset == 0) in io_init()
675 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset = in io_init()
676 ubi->ec_hdr_alsize; in io_init()
678 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset & in io_init()
679 ~(ubi->hdrs_min_io_size - 1); in io_init()
680 ubi->vid_hdr_shift = ubi->vid_hdr_offset - in io_init()
681 ubi->vid_hdr_aloffset; in io_init()
685 ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE; in io_init()
686 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); in io_init()
688 dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset); in io_init()
689 dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); in io_init()
690 dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift); in io_init()
691 dbg_gen("leb_start %d", ubi->leb_start); in io_init()
693 /* The shift must be aligned to 32-bit boundary */ in io_init()
694 if (ubi->vid_hdr_shift % 4) { in io_init()
696 ubi->vid_hdr_shift); in io_init()
697 return -EINVAL; in io_init()
701 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || in io_init()
702 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || in io_init()
703 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || in io_init()
704 ubi->leb_start & (ubi->min_io_size - 1)) { in io_init()
706 ubi->vid_hdr_offset, ubi->leb_start); in io_init()
707 return -EINVAL; in io_init()
714 ubi->max_erroneous = ubi->peb_count / 10; in io_init()
715 if (ubi->max_erroneous < 16) in io_init()
716 ubi->max_erroneous = 16; in io_init()
717 dbg_gen("max_erroneous %d", ubi->max_erroneous); in io_init()
722 * read-only mode. in io_init()
724 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { in io_init()
725 ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode"); in io_init()
726 ubi->ro_mode = 1; in io_init()
729 ubi->leb_size = ubi->peb_size - ubi->leb_start; in io_init()
731 if (!(ubi->mtd->flags & MTD_WRITEABLE)) { in io_init()
732 ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode", in io_init()
733 ubi->mtd->index); in io_init()
734 ubi->ro_mode = 1; in io_init()
738 * Note, ideally, we have to initialize @ubi->bad_peb_count here. But in io_init()
740 * over all physical eraseblocks and invoke mtd->block_is_bad() for in io_init()
741 * each physical eraseblock. So, we leave @ubi->bad_peb_count in io_init()
749 * autoresize - re-size the volume which has the "auto-resize" flag set.
751 * @vol_id: ID of the volume to re-size
753 * This function re-sizes the volume marked by the %UBI_VTBL_AUTORESIZE_FLG in
754 * the volume table to the largest possible size. See comments in ubi-header.h
761 struct ubi_volume *vol = ubi->volumes[vol_id]; in autoresize()
762 int err, old_reserved_pebs = vol->reserved_pebs; in autoresize()
764 if (ubi->ro_mode) { in autoresize()
765 ubi_warn(ubi, "skip auto-resize because of R/O mode"); in autoresize()
770 * Clear the auto-resize flag in the volume in-memory copy of the in autoresize()
771 * volume table, and 'ubi_resize_volume()' will propagate this change in autoresize()
774 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; in autoresize()
776 if (ubi->avail_pebs == 0) { in autoresize()
780 * No available PEBs to re-size the volume, clear the flag on in autoresize()
783 vtbl_rec = ubi->vtbl[vol_id]; in autoresize()
786 ubi_err(ubi, "cannot clean auto-resize flag for volume %d", in autoresize()
791 old_reserved_pebs + ubi->avail_pebs); in autoresize()
793 ubi_err(ubi, "cannot auto-resize volume %d", in autoresize()
800 ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs", in autoresize()
801 vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs); in autoresize()
806 * ubi_attach_mtd_dev - attach an MTD device.
828 return -EINVAL; in ubi_attach_mtd_dev()
841 if (ubi && mtd->index == ubi->mtd->index) { in ubi_attach_mtd_dev()
843 mtd->index, i); in ubi_attach_mtd_dev()
844 return -EEXIST; in ubi_attach_mtd_dev()
849 * Make sure this MTD device is not emulated on top of an UBI volume in ubi_attach_mtd_dev()
856 if (mtd->type == MTD_UBIVOLUME) { in ubi_attach_mtd_dev()
857 pr_err("ubi: refuse attaching mtd%d - it is already emulated on top of UBI\n", in ubi_attach_mtd_dev()
858 mtd->index); in ubi_attach_mtd_dev()
859 return -EINVAL; in ubi_attach_mtd_dev()
869 if (mtd->type == MTD_MLCNANDFLASH && in ubi_attach_mtd_dev()
870 !(mtd->flags & MTD_SLC_ON_MLC_EMULATION)) { in ubi_attach_mtd_dev()
871 pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n", in ubi_attach_mtd_dev()
872 mtd->index); in ubi_attach_mtd_dev()
873 return -EINVAL; in ubi_attach_mtd_dev()
884 return -ENFILE; in ubi_attach_mtd_dev()
888 return -EINVAL; in ubi_attach_mtd_dev()
893 return -EEXIST; in ubi_attach_mtd_dev()
899 return -ENOMEM; in ubi_attach_mtd_dev()
901 device_initialize(&ubi->dev); in ubi_attach_mtd_dev()
902 ubi->dev.release = dev_release; in ubi_attach_mtd_dev()
903 ubi->dev.class = &ubi_class; in ubi_attach_mtd_dev()
904 ubi->dev.groups = ubi_dev_groups; in ubi_attach_mtd_dev()
906 ubi->mtd = mtd; in ubi_attach_mtd_dev()
907 ubi->ubi_num = ubi_num; in ubi_attach_mtd_dev()
908 ubi->vid_hdr_offset = vid_hdr_offset; in ubi_attach_mtd_dev()
909 ubi->autoresize_vol_id = -1; in ubi_attach_mtd_dev()
912 ubi->fm_pool.used = ubi->fm_pool.size = 0; in ubi_attach_mtd_dev()
913 ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0; in ubi_attach_mtd_dev()
919 ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size, in ubi_attach_mtd_dev()
920 ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE); in ubi_attach_mtd_dev()
921 ubi->fm_pool.max_size = max(ubi->fm_pool.max_size, in ubi_attach_mtd_dev()
924 ubi->fm_wl_pool.max_size = ubi->fm_pool.max_size / 2; in ubi_attach_mtd_dev()
925 ubi->fm_disabled = !fm_autoconvert; in ubi_attach_mtd_dev()
929 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) in ubi_attach_mtd_dev()
933 ubi->fm_disabled = 1; in ubi_attach_mtd_dev()
936 ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size); in ubi_attach_mtd_dev()
938 ubi->fm_wl_pool.max_size); in ubi_attach_mtd_dev()
940 ubi->fm_disabled = 1; in ubi_attach_mtd_dev()
942 mutex_init(&ubi->buf_mutex); in ubi_attach_mtd_dev()
943 mutex_init(&ubi->ckvol_mutex); in ubi_attach_mtd_dev()
944 mutex_init(&ubi->device_mutex); in ubi_attach_mtd_dev()
945 spin_lock_init(&ubi->volumes_lock); in ubi_attach_mtd_dev()
946 init_rwsem(&ubi->fm_protect); in ubi_attach_mtd_dev()
947 init_rwsem(&ubi->fm_eba_sem); in ubi_attach_mtd_dev()
949 ubi_msg(ubi, "attaching mtd%d", mtd->index); in ubi_attach_mtd_dev()
955 err = -ENOMEM; in ubi_attach_mtd_dev()
956 ubi->peb_buf = vmalloc(ubi->peb_size); in ubi_attach_mtd_dev()
957 if (!ubi->peb_buf) in ubi_attach_mtd_dev()
961 ubi->fm_size = ubi_calc_fm_size(ubi); in ubi_attach_mtd_dev()
962 ubi->fm_buf = vzalloc(ubi->fm_size); in ubi_attach_mtd_dev()
963 if (!ubi->fm_buf) in ubi_attach_mtd_dev()
969 mtd->index, err); in ubi_attach_mtd_dev()
973 if (ubi->autoresize_vol_id != -1) { in ubi_attach_mtd_dev()
974 err = autoresize(ubi, ubi->autoresize_vol_id); in ubi_attach_mtd_dev()
987 ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name); in ubi_attach_mtd_dev()
988 if (IS_ERR(ubi->bgt_thread)) { in ubi_attach_mtd_dev()
989 err = PTR_ERR(ubi->bgt_thread); in ubi_attach_mtd_dev()
991 ubi->bgt_name, err); in ubi_attach_mtd_dev()
996 mtd->index, mtd->name, ubi->flash_size >> 20); in ubi_attach_mtd_dev()
998 ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size); in ubi_attach_mtd_dev()
999 ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d", in ubi_attach_mtd_dev()
1000 ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size); in ubi_attach_mtd_dev()
1002 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start); in ubi_attach_mtd_dev()
1004 ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count); in ubi_attach_mtd_dev()
1005 ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d", in ubi_attach_mtd_dev()
1006 ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT, in ubi_attach_mtd_dev()
1007 ubi->vtbl_slots); in ubi_attach_mtd_dev()
1009 ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD, in ubi_attach_mtd_dev()
1010 ubi->image_seq); in ubi_attach_mtd_dev()
1012 ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs); in ubi_attach_mtd_dev()
1016 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up. in ubi_attach_mtd_dev()
1018 spin_lock(&ubi->wl_lock); in ubi_attach_mtd_dev()
1019 ubi->thread_enabled = 1; in ubi_attach_mtd_dev()
1020 wake_up_process(ubi->bgt_thread); in ubi_attach_mtd_dev()
1021 spin_unlock(&ubi->wl_lock); in ubi_attach_mtd_dev()
1034 vfree(ubi->vtbl); in ubi_attach_mtd_dev()
1036 vfree(ubi->peb_buf); in ubi_attach_mtd_dev()
1037 vfree(ubi->fm_buf); in ubi_attach_mtd_dev()
1038 put_device(&ubi->dev); in ubi_attach_mtd_dev()
1043 * ubi_detach_mtd_dev - detach an MTD device.
1048 * underlying MTD device. Returns zero in case of success and %-EBUSY if the
1049 * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not
1060 return -EINVAL; in ubi_detach_mtd_dev()
1064 return -EINVAL; in ubi_detach_mtd_dev()
1067 put_device(&ubi->dev); in ubi_detach_mtd_dev()
1068 ubi->ref_count -= 1; in ubi_detach_mtd_dev()
1069 if (ubi->ref_count) { in ubi_detach_mtd_dev()
1072 return -EBUSY; in ubi_detach_mtd_dev()
1076 ubi->ubi_name, ubi->ref_count); in ubi_detach_mtd_dev()
1081 ubi_assert(ubi_num == ubi->ubi_num); in ubi_detach_mtd_dev()
1083 ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index); in ubi_detach_mtd_dev()
1096 if (ubi->bgt_thread) in ubi_detach_mtd_dev()
1097 kthread_stop(ubi->bgt_thread); in ubi_detach_mtd_dev()
1100 cancel_work_sync(&ubi->fm_work); in ubi_detach_mtd_dev()
1107 vfree(ubi->vtbl); in ubi_detach_mtd_dev()
1108 vfree(ubi->peb_buf); in ubi_detach_mtd_dev()
1109 vfree(ubi->fm_buf); in ubi_detach_mtd_dev()
1110 ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index); in ubi_detach_mtd_dev()
1111 put_mtd_device(ubi->mtd); in ubi_detach_mtd_dev()
1112 put_device(&ubi->dev); in ubi_detach_mtd_dev()
1117 * open_mtd_by_chdev - open an MTD device by its character device node path.
1142 return ERR_PTR(-EINVAL); in open_mtd_by_chdev()
1151 return ERR_PTR(-EINVAL); in open_mtd_by_chdev()
1157 * open_mtd_device - open MTD device by name, character device path, or number.
1179 if (PTR_ERR(mtd) == -ENODEV) in open_mtd_device()
1199 return -EINVAL; in ubi_init()
1210 goto out; in ubi_init()
1217 err = -ENOMEM; in ubi_init()
1233 mtd = open_mtd_device(p->name); in ubi_init()
1237 p->name, err); in ubi_init()
1238 /* See comment below re-ubi_is_module(). */ in ubi_init()
1245 err = ubi_attach_mtd_dev(mtd, p->ubi_num, in ubi_init()
1246 p->vid_hdr_offs, p->max_beb_per1024); in ubi_init()
1250 mtd->index); in ubi_init()
1255 * However, later on it was found out that this in ubi_init()
1262 * non-module case, but preserved the old behavior for in ubi_init()
1275 /* See comment above re-ubi_is_module(). */ in ubi_init()
1286 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1); in ubi_init()
1294 out: in ubi_init()
1310 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1); in ubi_exit()
1321 * bytes_str_to_int - convert a number of bytes string into an integer.
1335 return -EINVAL; in bytes_str_to_int()
1353 return -EINVAL; in bytes_str_to_int()
1360 * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter.
1376 return -EINVAL; in ubi_mtd_param_parse()
1381 return -EINVAL; in ubi_mtd_param_parse()
1388 return -EINVAL; in ubi_mtd_param_parse()
1392 pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n"); in ubi_mtd_param_parse()
1399 if (buf[len - 1] == '\n') in ubi_mtd_param_parse()
1400 buf[len - 1] = '\0'; in ubi_mtd_param_parse()
1407 return -EINVAL; in ubi_mtd_param_parse()
1411 strcpy(&p->name[0], tokens[0]); in ubi_mtd_param_parse()
1415 p->vid_hdr_offs = bytes_str_to_int(token); in ubi_mtd_param_parse()
1417 if (p->vid_hdr_offs < 0) in ubi_mtd_param_parse()
1418 return p->vid_hdr_offs; in ubi_mtd_param_parse()
1423 int err = kstrtoint(token, 10, &p->max_beb_per1024); in ubi_mtd_param_parse()
1428 return -EINVAL; in ubi_mtd_param_parse()
1434 int err = kstrtoint(token, 10, &p->ubi_num); in ubi_mtd_param_parse()
1439 return -EINVAL; in ubi_mtd_param_parse()
1442 p->ubi_num = UBI_DEV_NUM_AUTO; in ubi_mtd_param_parse()
1457 "Example 1: mtd=/dev/mtd0 - attach MTD device /dev/mtd0.\n"
1458 …"Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offs…
1459 …"Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and r…
1460 …"Example 4: mtd=/dev/mtd1,0,0,5 - attach MTD device /dev/mtd1 to UBI 5 and using default values fo…
1469 MODULE_DESCRIPTION("UBI - Unsorted Block Images");