• Home
  • Raw
  • Download

Lines Matching +full:alloc +full:- +full:ranges

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017-2018 Intel Corporation. All rights reserved. */
10 #include "dax-private.h"
26 * We only ever expect to handle device-dax instances, i.e. the in dax_bus_uevent()
44 list_for_each_entry(dax_id, &dax_drv->ids, list) in __dax_match_id()
45 if (sysfs_streq(dax_id->dev_name, dev_name)) in __dax_match_id()
78 return -EINVAL; in do_id_store()
81 return -EINVAL; in do_id_store()
89 strncpy(dax_id->dev_name, buf, DAX_NAME_LEN); in do_id_store()
90 list_add(&dax_id->list, &dax_drv->ids); in do_id_store()
92 rc = -ENOMEM; in do_id_store()
96 list_del(&dax_id->list); in do_id_store()
136 return (dax_region->res.flags & IORESOURCE_DAX_STATIC) != 0; in is_static()
144 device_lock_assert(&dev_dax->dev); in dev_dax_size()
146 for (i = 0; i < dev_dax->nr_range; i++) in dev_dax_size()
147 size += range_len(&dev_dax->ranges[i].range); in dev_dax_size()
154 struct dax_device_driver *dax_drv = to_dax_drv(dev->driver); in dax_bus_probe()
156 struct dax_region *dax_region = dev_dax->region; in dax_bus_probe()
159 if (dev_dax_size(dev_dax) == 0 || dev_dax->id < 0) in dax_bus_probe()
160 return -ENXIO; in dax_bus_probe()
162 rc = dax_drv->probe(dev_dax); in dax_bus_probe()
171 if (dax_region->seed == dev) in dax_bus_probe()
172 dax_region->seed = NULL; in dax_bus_probe()
179 struct dax_device_driver *dax_drv = to_dax_drv(dev->driver); in dax_bus_remove()
182 return dax_drv->remove(dev_dax); in dax_bus_remove()
199 * All but the 'device-dax' driver, which has 'match_always' in dax_bus_match()
202 if (dax_drv->match_always) in dax_bus_match()
218 return sprintf(buf, "%d\n", dax_region->id); in id_show()
228 resource_size(&dax_region->res)); in region_size_show()
238 return sprintf(buf, "%u\n", dax_region->align); in region_align_show()
244 for (res = (dax_region)->res.child; res; res = res->sibling)
248 resource_size_t size = resource_size(&dax_region->res); in dax_region_avail_size()
251 device_lock_assert(dax_region->dev); in dax_region_avail_size()
254 size -= resource_size(res); in dax_region_avail_size()
280 return -EINVAL; in seed_show()
283 seed = dax_region->seed; in seed_show()
299 return -EINVAL; in create_show()
302 youngest = dax_region->youngest; in create_show()
318 return -EINVAL; in create_store()
324 return -EINVAL; in create_store()
329 rc = -ENOSPC; in create_store()
334 .id = -1, in create_store()
348 if (!dax_region->seed) in create_store()
349 dax_region->seed = &dev_dax->dev; in create_store()
350 dax_region->youngest = &dev_dax->dev; in create_store()
362 struct dax_device *dax_dev = dev_dax->dax_dev; in kill_dev_dax()
366 unmap_mapping_range(inode->i_mapping, 0, 0, 1); in kill_dev_dax()
372 int i = dev_dax->nr_range - 1; in trim_dev_dax_range()
373 struct range *range = &dev_dax->ranges[i].range; in trim_dev_dax_range()
374 struct dax_region *dax_region = dev_dax->region; in trim_dev_dax_range()
376 device_lock_assert(dax_region->dev); in trim_dev_dax_range()
377 dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i, in trim_dev_dax_range()
378 (unsigned long long)range->start, in trim_dev_dax_range()
379 (unsigned long long)range->end); in trim_dev_dax_range()
381 __release_region(&dax_region->res, range->start, range_len(range)); in trim_dev_dax_range()
382 if (--dev_dax->nr_range == 0) { in trim_dev_dax_range()
383 kfree(dev_dax->ranges); in trim_dev_dax_range()
384 dev_dax->ranges = NULL; in trim_dev_dax_range()
390 while (dev_dax->nr_range) in free_dev_dax_ranges()
416 kref_put(&dax_region->kref, dax_region_free); in dax_region_put()
423 struct device *dev = &dev_dax->dev; in __free_dev_dax_id()
425 int rc = dev_dax->id; in __free_dev_dax_id()
429 if (!dev_dax->dyn_id || dev_dax->id < 0) in __free_dev_dax_id()
430 return -1; in __free_dev_dax_id()
431 dax_region = dev_dax->region; in __free_dev_dax_id()
432 ida_free(&dax_region->ida, dev_dax->id); in __free_dev_dax_id()
434 dev_dax->id = -1; in __free_dev_dax_id()
440 struct device *dev = &dev_dax->dev; in free_dev_dax_id()
451 struct dax_region *dax_region = dev_dax->region; in alloc_dev_dax_id()
454 id = ida_alloc(&dax_region->ida, GFP_KERNEL); in alloc_dev_dax_id()
457 kref_get(&dax_region->kref); in alloc_dev_dax_id()
458 dev_dax->dyn_id = true; in alloc_dev_dax_id()
459 dev_dax->id = id; in alloc_dev_dax_id()
473 return -EINVAL; in delete_store()
475 victim = device_find_child_by_name(dax_region->dev, buf); in delete_store()
477 return -ENXIO; in delete_store()
482 if (victim->driver || dev_dax_size(dev_dax)) in delete_store()
483 rc = -EBUSY; in delete_store()
487 * again, but always preserve device-id-0 so that in delete_store()
491 if (dev_dax->id > 0) { in delete_store()
494 if (dax_region->seed == victim) in delete_store()
495 dax_region->seed = NULL; in delete_store()
496 if (dax_region->youngest == victim) in delete_store()
497 dax_region->youngest = NULL; in delete_store()
499 rc = -EBUSY; in delete_store()
525 return a->mode; in dax_region_visible()
554 sysfs_remove_groups(&dax_region->dev->kobj, in dax_region_unregister()
567 * parent->driver_data. This WARN is a reminder / safeguard for in alloc_dax_region()
568 * developers of device-dax drivers. in alloc_dax_region()
575 if (!IS_ALIGNED(range->start, align) in alloc_dax_region()
584 kref_init(&dax_region->kref); in alloc_dax_region()
585 dax_region->id = region_id; in alloc_dax_region()
586 dax_region->align = align; in alloc_dax_region()
587 dax_region->dev = parent; in alloc_dax_region()
588 dax_region->target_node = target_node; in alloc_dax_region()
589 ida_init(&dax_region->ida); in alloc_dax_region()
590 dax_region->res = (struct resource) { in alloc_dax_region()
591 .start = range->start, in alloc_dax_region()
592 .end = range->end, in alloc_dax_region()
596 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { in alloc_dax_region()
601 kref_get(&dax_region->kref); in alloc_dax_region()
611 struct device *parent = dev->parent; in dax_mapping_release()
614 ida_free(&dev_dax->ida, mapping->id); in dax_mapping_release()
623 struct dev_dax *dev_dax = to_dev_dax(dev->parent); in unregister_dax_mapping()
624 struct dax_region *dax_region = dev_dax->region; in unregister_dax_mapping()
628 device_lock_assert(dax_region->dev); in unregister_dax_mapping()
630 dev_dax->ranges[mapping->range_id].mapping = NULL; in unregister_dax_mapping()
631 mapping->range_id = -1; in unregister_dax_mapping()
640 struct dev_dax *dev_dax = to_dev_dax(dev->parent); in get_dax_range()
641 struct dax_region *dax_region = dev_dax->region; in get_dax_range()
643 device_lock(dax_region->dev); in get_dax_range()
644 if (mapping->range_id < 0) { in get_dax_range()
645 device_unlock(dax_region->dev); in get_dax_range()
649 return &dev_dax->ranges[mapping->range_id]; in get_dax_range()
654 struct dax_mapping *mapping = dax_range->mapping; in put_dax_range()
655 struct dev_dax *dev_dax = to_dev_dax(mapping->dev.parent); in put_dax_range()
656 struct dax_region *dax_region = dev_dax->region; in put_dax_range()
658 device_unlock(dax_region->dev); in put_dax_range()
669 return -ENXIO; in start_show()
670 rc = sprintf(buf, "%#llx\n", dax_range->range.start); in start_show()
685 return -ENXIO; in end_show()
686 rc = sprintf(buf, "%#llx\n", dax_range->range.end); in end_show()
701 return -ENXIO; in pgoff_show()
702 rc = sprintf(buf, "%#lx\n", dax_range->pgoff); in pgoff_show()
732 struct dax_region *dax_region = dev_dax->region; in devm_register_dax_mapping()
737 device_lock_assert(dax_region->dev); in devm_register_dax_mapping()
739 if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver, in devm_register_dax_mapping()
741 return -ENXIO; in devm_register_dax_mapping()
745 return -ENOMEM; in devm_register_dax_mapping()
746 mapping->range_id = range_id; in devm_register_dax_mapping()
747 mapping->id = ida_alloc(&dev_dax->ida, GFP_KERNEL); in devm_register_dax_mapping()
748 if (mapping->id < 0) { in devm_register_dax_mapping()
750 return -ENOMEM; in devm_register_dax_mapping()
752 dev_dax->ranges[range_id].mapping = mapping; in devm_register_dax_mapping()
753 dev = &mapping->dev; in devm_register_dax_mapping()
755 dev->parent = &dev_dax->dev; in devm_register_dax_mapping()
756 get_device(dev->parent); in devm_register_dax_mapping()
757 dev->type = &dax_mapping_type; in devm_register_dax_mapping()
758 dev_set_name(dev, "mapping%d", mapping->id); in devm_register_dax_mapping()
765 rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_mapping, in devm_register_dax_mapping()
775 struct dax_region *dax_region = dev_dax->region; in alloc_dev_dax_range()
776 struct resource *res = &dax_region->res; in alloc_dev_dax_range()
777 struct device *dev = &dev_dax->dev; in alloc_dev_dax_range()
778 struct dev_dax_range *ranges; in alloc_dev_dax_range() local
780 struct resource *alloc; in alloc_dev_dax_range() local
783 device_lock_assert(dax_region->dev); in alloc_dev_dax_range()
785 /* handle the seed alloc special case */ in alloc_dev_dax_range()
787 if (dev_WARN_ONCE(dev, dev_dax->nr_range, in alloc_dev_dax_range()
788 "0-size allocation must be first\n")) in alloc_dev_dax_range()
789 return -EBUSY; in alloc_dev_dax_range()
790 /* nr_range == 0 is elsewhere special cased as 0-size device */ in alloc_dev_dax_range()
794 ranges = krealloc(dev_dax->ranges, sizeof(*ranges) in alloc_dev_dax_range()
795 * (dev_dax->nr_range + 1), GFP_KERNEL); in alloc_dev_dax_range()
796 if (!ranges) in alloc_dev_dax_range()
797 return -ENOMEM; in alloc_dev_dax_range()
799 alloc = __request_region(res, start, size, dev_name(dev), 0); in alloc_dev_dax_range()
800 if (!alloc) { in alloc_dev_dax_range()
802 * If this was an empty set of ranges nothing else in alloc_dev_dax_range()
803 * will release @ranges, so do it now. in alloc_dev_dax_range()
805 if (!dev_dax->nr_range) { in alloc_dev_dax_range()
806 kfree(ranges); in alloc_dev_dax_range()
807 ranges = NULL; in alloc_dev_dax_range()
809 dev_dax->ranges = ranges; in alloc_dev_dax_range()
810 return -ENOMEM; in alloc_dev_dax_range()
813 for (i = 0; i < dev_dax->nr_range; i++) in alloc_dev_dax_range()
814 pgoff += PHYS_PFN(range_len(&ranges[i].range)); in alloc_dev_dax_range()
815 dev_dax->ranges = ranges; in alloc_dev_dax_range()
816 ranges[dev_dax->nr_range++] = (struct dev_dax_range) { in alloc_dev_dax_range()
819 .start = alloc->start, in alloc_dev_dax_range()
820 .end = alloc->end, in alloc_dev_dax_range()
824 dev_dbg(dev, "alloc range[%d]: %pa:%pa\n", dev_dax->nr_range - 1, in alloc_dev_dax_range()
825 &alloc->start, &alloc->end); in alloc_dev_dax_range()
831 if (!device_is_registered(&dev_dax->dev)) in alloc_dev_dax_range()
834 rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1); in alloc_dev_dax_range()
843 int last_range = dev_dax->nr_range - 1; in adjust_dev_dax_range()
844 struct dev_dax_range *dax_range = &dev_dax->ranges[last_range]; in adjust_dev_dax_range()
845 struct dax_region *dax_region = dev_dax->region; in adjust_dev_dax_range()
847 struct range *range = &dax_range->range; in adjust_dev_dax_range()
848 struct device *dev = &dev_dax->dev; in adjust_dev_dax_range()
851 device_lock_assert(dax_region->dev); in adjust_dev_dax_range()
854 return -EINVAL; in adjust_dev_dax_range()
856 rc = adjust_resource(res, range->start, size); in adjust_dev_dax_range()
861 .start = range->start, in adjust_dev_dax_range()
862 .end = range->start + size - 1, in adjust_dev_dax_range()
866 last_range, (unsigned long long) range->start, in adjust_dev_dax_range()
867 (unsigned long long) range->end); in adjust_dev_dax_range()
891 return IS_ALIGNED(size, max_t(unsigned long, dev_dax->align, memremap_compat_align())); in alloc_is_aligned()
896 resource_size_t to_shrink = dev_dax_size(dev_dax) - size; in dev_dax_shrink()
897 struct dax_region *dax_region = dev_dax->region; in dev_dax_shrink()
898 struct device *dev = &dev_dax->dev; in dev_dax_shrink()
901 for (i = dev_dax->nr_range - 1; i >= 0; i--) { in dev_dax_shrink()
902 struct range *range = &dev_dax->ranges[i].range; in dev_dax_shrink()
903 struct dax_mapping *mapping = dev_dax->ranges[i].mapping; in dev_dax_shrink()
909 devm_release_action(dax_region->dev, in dev_dax_shrink()
910 unregister_dax_mapping, &mapping->dev); in dev_dax_shrink()
912 to_shrink -= shrink; in dev_dax_shrink()
919 if (strcmp(res->name, dev_name(dev)) == 0 in dev_dax_shrink()
920 && res->start == range->start) { in dev_dax_shrink()
925 if (dev_WARN_ONCE(dev, !adjust || i != dev_dax->nr_range - 1, in dev_dax_shrink()
927 return -ENXIO; in dev_dax_shrink()
929 - shrink); in dev_dax_shrink()
936 * allocations. I.e. the dev_dax->ranges array is ordered by increasing pgoff.
943 if (dev_dax->nr_range == 0) in adjust_ok()
945 if (strcmp(res->name, dev_name(&dev_dax->dev)) != 0) in adjust_ok()
947 last = &dev_dax->ranges[dev_dax->nr_range - 1]; in adjust_ok()
948 if (last->range.start != res->start || last->range.end != res->end) in adjust_ok()
950 for (i = 0; i < dev_dax->nr_range - 1; i++) { in adjust_ok()
951 struct dev_dax_range *dax_range = &dev_dax->ranges[i]; in adjust_ok()
953 if (dax_range->pgoff > last->pgoff) in adjust_ok()
965 struct resource *region_res = &dax_region->res; in dev_dax_resize()
966 struct device *dev = &dev_dax->dev; in dev_dax_resize()
968 resource_size_t alloc = 0; in dev_dax_resize() local
971 if (dev->driver) in dev_dax_resize()
972 return -EBUSY; in dev_dax_resize()
975 if (size > dev_size && size - dev_size > avail) in dev_dax_resize()
976 return -ENOSPC; in dev_dax_resize()
980 to_alloc = size - dev_size; in dev_dax_resize()
983 return -ENXIO; in dev_dax_resize()
991 first = region_res->child; in dev_dax_resize()
993 return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc); in dev_dax_resize()
995 rc = -ENOSPC; in dev_dax_resize()
996 for (res = first; res; res = res->sibling) { in dev_dax_resize()
997 struct resource *next = res->sibling; in dev_dax_resize()
1000 if (res == first && res->start > dax_region->res.start) { in dev_dax_resize()
1001 alloc = min(res->start - dax_region->res.start, to_alloc); in dev_dax_resize()
1002 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, alloc); in dev_dax_resize()
1006 alloc = 0; in dev_dax_resize()
1008 if (next && next->start > res->end + 1) in dev_dax_resize()
1009 alloc = min(next->start - (res->end + 1), to_alloc); in dev_dax_resize()
1012 if (!alloc && !next && res->end < region_res->end) in dev_dax_resize()
1013 alloc = min(region_res->end - res->end, to_alloc); in dev_dax_resize()
1015 if (!alloc) in dev_dax_resize()
1019 rc = adjust_dev_dax_range(dev_dax, res, resource_size(res) + alloc); in dev_dax_resize()
1022 rc = alloc_dev_dax_range(dev_dax, res->end + 1, alloc); in dev_dax_resize()
1027 to_alloc -= alloc; in dev_dax_resize()
1039 struct dax_region *dax_region = dev_dax->region; in size_store()
1047 return -EINVAL; in size_store()
1050 device_lock(dax_region->dev); in size_store()
1051 if (!dax_region->dev->driver) { in size_store()
1052 device_unlock(dax_region->dev); in size_store()
1053 return -ENXIO; in size_store()
1058 device_unlock(dax_region->dev); in size_store()
1068 ssize_t rc = -EINVAL; in range_parse()
1075 start = strsep(&end, "-"); in range_parse()
1082 range->start = addr; in range_parse()
1087 range->end = addr; in range_parse()
1098 struct dax_region *dax_region = dev_dax->region; in mapping_store()
1107 rc = -ENXIO; in mapping_store()
1108 device_lock(dax_region->dev); in mapping_store()
1109 if (!dax_region->dev->driver) { in mapping_store()
1110 device_unlock(dax_region->dev); in mapping_store()
1119 device_unlock(dax_region->dev); in mapping_store()
1130 return sprintf(buf, "%d\n", dev_dax->align); in align_show()
1136 struct device *dev = &dev_dax->dev; in dev_dax_validate_align()
1141 __func__, dev_dax->align, &dev_size); in dev_dax_validate_align()
1142 return -EINVAL; in dev_dax_validate_align()
1145 for (i = 0; i < dev_dax->nr_range; i++) { in dev_dax_validate_align()
1146 size_t len = range_len(&dev_dax->ranges[i].range); in dev_dax_validate_align()
1150 __func__, dev_dax->align, i); in dev_dax_validate_align()
1151 return -EINVAL; in dev_dax_validate_align()
1162 struct dax_region *dax_region = dev_dax->region; in align_store()
1168 return -ENXIO; in align_store()
1171 return -EINVAL; in align_store()
1173 device_lock(dax_region->dev); in align_store()
1174 if (!dax_region->dev->driver) { in align_store()
1175 device_unlock(dax_region->dev); in align_store()
1176 return -ENXIO; in align_store()
1180 if (dev->driver) { in align_store()
1181 rc = -EBUSY; in align_store()
1185 align_save = dev_dax->align; in align_store()
1186 dev_dax->align = val; in align_store()
1189 dev_dax->align = align_save; in align_store()
1192 device_unlock(dax_region->dev); in align_store()
1199 struct dax_region *dax_region = dev_dax->region; in dev_dax_target_node()
1201 return dax_region->target_node; in dev_dax_target_node()
1217 struct dax_region *dax_region = dev_dax->region; in resource_show()
1220 if (dev_dax->nr_range < 1) in resource_show()
1221 start = dax_region->res.start; in resource_show()
1223 start = dev_dax->ranges[0].range.start; in resource_show()
1233 * We only ever expect to handle device-dax instances, i.e. the in modalias_show()
1251 struct dax_region *dax_region = dev_dax->region; in dev_dax_visible()
1262 return a->mode; in dev_dax_visible()
1289 struct dax_device *dax_dev = dev_dax->dax_dev; in dev_dax_release()
1293 kfree(dev_dax->pgmap); in dev_dax_release()
1304 struct dax_region *dax_region = data->dax_region; in devm_create_dev_dax()
1305 struct device *parent = dax_region->dev; in devm_create_dev_dax()
1314 return ERR_PTR(-ENOMEM); in devm_create_dev_dax()
1316 dev_dax->region = dax_region; in devm_create_dev_dax()
1318 if (dev_WARN_ONCE(parent, data->id < 0, in devm_create_dev_dax()
1320 rc = -EINVAL; in devm_create_dev_dax()
1324 dev_dax->id = data->id; in devm_create_dev_dax()
1326 if (dev_WARN_ONCE(parent, data->id >= 0, in devm_create_dev_dax()
1328 rc = -EINVAL; in devm_create_dev_dax()
1337 dev = &dev_dax->dev; in devm_create_dev_dax()
1339 dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id); in devm_create_dev_dax()
1341 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, data->size); in devm_create_dev_dax()
1345 if (data->pgmap) { in devm_create_dev_dax()
1349 dev_dax->pgmap = kmemdup(data->pgmap, in devm_create_dev_dax()
1351 if (!dev_dax->pgmap) { in devm_create_dev_dax()
1352 rc = -ENOMEM; in devm_create_dev_dax()
1370 dev_dax->dax_dev = dax_dev; in devm_create_dev_dax()
1371 dev_dax->target_node = dax_region->target_node; in devm_create_dev_dax()
1372 dev_dax->align = dax_region->align; in devm_create_dev_dax()
1373 ida_init(&dev_dax->ida); in devm_create_dev_dax()
1376 dev->devt = inode->i_rdev; in devm_create_dev_dax()
1377 if (data->subsys == DEV_DAX_BUS) in devm_create_dev_dax()
1378 dev->bus = &dax_bus_type; in devm_create_dev_dax()
1380 dev->class = dax_class; in devm_create_dev_dax()
1381 dev->parent = parent; in devm_create_dev_dax()
1382 dev->type = &dev_dax_type; in devm_create_dev_dax()
1391 rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev); in devm_create_dev_dax()
1396 if (dev_dax->nr_range && range_len(&dev_dax->ranges[0].range)) { in devm_create_dev_dax()
1405 kfree(dev_dax->pgmap); in devm_create_dev_dax()
1422 struct device_driver *drv = &dax_drv->drv; in __dax_driver_register()
1425 INIT_LIST_HEAD(&dax_drv->ids); in __dax_driver_register()
1426 drv->owner = module; in __dax_driver_register()
1427 drv->name = mod_name; in __dax_driver_register()
1428 drv->mod_name = mod_name; in __dax_driver_register()
1429 drv->bus = &dax_bus_type; in __dax_driver_register()
1433 match_always_count += dax_drv->match_always; in __dax_driver_register()
1435 match_always_count--; in __dax_driver_register()
1437 rc = -EINVAL; in __dax_driver_register()
1448 struct device_driver *drv = &dax_drv->drv; in dax_driver_unregister()
1452 match_always_count -= dax_drv->match_always; in dax_driver_unregister()
1453 list_for_each_entry_safe(dax_id, _id, &dax_drv->ids, list) { in dax_driver_unregister()
1454 list_del(&dax_id->list); in dax_driver_unregister()