• Home
  • Raw
  • Download

Lines Matching +full:alloc +full:- +full:ranges

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017-2018 Intel Corporation. All rights reserved. */
10 #include "dax-private.h"
24 * We only ever expect to handle device-dax instances, i.e. the in dax_bus_uevent()
42 list_for_each_entry(dax_id, &dax_drv->ids, list) in __dax_match_id()
43 if (sysfs_streq(dax_id->dev_name, dev_name)) in __dax_match_id()
64 if (dev_dax->region->res.flags & IORESOURCE_DAX_KMEM) in dax_match_type()
67 if (dax_drv->type == type) in dax_match_type()
71 if (dax_drv->type == DAXDRV_DEVICE_TYPE && in dax_match_type()
95 return -EINVAL; in do_id_store()
98 return -EINVAL; in do_id_store()
106 strncpy(dax_id->dev_name, buf, DAX_NAME_LEN); in do_id_store()
107 list_add(&dax_id->list, &dax_drv->ids); in do_id_store()
109 rc = -ENOMEM; in do_id_store()
112 list_del(&dax_id->list); in do_id_store()
158 * /dev/daxN.M device composed by one or more potentially discontiguous ranges.
161 * (IORESOURCE_DAX_STATIC). On static dax devices, the @pgmap is pre-assigned
163 * devices it is NULL but afterwards allocated by dax core on device ->probe().
169 return (dax_region->res.flags & IORESOURCE_DAX_STATIC) != 0; in is_static()
174 return is_static(dev_dax->region); in static_dev_dax()
183 device_lock_assert(&dev_dax->dev); in dev_dax_size()
185 for (i = 0; i < dev_dax->nr_range; i++) in dev_dax_size()
186 size += range_len(&dev_dax->ranges[i].range); in dev_dax_size()
193 struct dax_device_driver *dax_drv = to_dax_drv(dev->driver); in dax_bus_probe()
195 struct dax_region *dax_region = dev_dax->region; in dax_bus_probe()
198 if (dev_dax_size(dev_dax) == 0 || dev_dax->id < 0) in dax_bus_probe()
199 return -ENXIO; in dax_bus_probe()
201 rc = dax_drv->probe(dev_dax); in dax_bus_probe()
210 if (dax_region->seed == dev) in dax_bus_probe()
211 dax_region->seed = NULL; in dax_bus_probe()
218 struct dax_device_driver *dax_drv = to_dax_drv(dev->driver); in dax_bus_remove()
221 if (dax_drv->remove) in dax_bus_remove()
222 dax_drv->remove(dev_dax); in dax_bus_remove()
253 return sprintf(buf, "%d\n", dax_region->id); in id_show()
263 resource_size(&dax_region->res)); in region_size_show()
273 return sprintf(buf, "%u\n", dax_region->align); in region_align_show()
279 for (res = (dax_region)->res.child; res; res = res->sibling)
283 resource_size_t size = resource_size(&dax_region->res); in dax_region_avail_size()
286 device_lock_assert(dax_region->dev); in dax_region_avail_size()
289 size -= resource_size(res); in dax_region_avail_size()
315 return -EINVAL; in seed_show()
318 seed = dax_region->seed; in seed_show()
334 return -EINVAL; in create_show()
337 youngest = dax_region->youngest; in create_show()
353 return -EINVAL; in create_store()
359 return -EINVAL; in create_store()
364 rc = -ENOSPC; in create_store()
369 .id = -1, in create_store()
383 if (!dax_region->seed) in create_store()
384 dax_region->seed = &dev_dax->dev; in create_store()
385 dax_region->youngest = &dev_dax->dev; in create_store()
397 struct dax_device *dax_dev = dev_dax->dax_dev; in kill_dev_dax()
401 unmap_mapping_range(inode->i_mapping, 0, 0, 1); in kill_dev_dax()
406 * ranges on probe() from previous reconfigurations of region devices. in kill_dev_dax()
409 dev_dax->pgmap = NULL; in kill_dev_dax()
415 int i = dev_dax->nr_range - 1; in trim_dev_dax_range()
416 struct range *range = &dev_dax->ranges[i].range; in trim_dev_dax_range()
417 struct dax_region *dax_region = dev_dax->region; in trim_dev_dax_range()
419 device_lock_assert(dax_region->dev); in trim_dev_dax_range()
420 dev_dbg(&dev_dax->dev, "delete range[%d]: %#llx:%#llx\n", i, in trim_dev_dax_range()
421 (unsigned long long)range->start, in trim_dev_dax_range()
422 (unsigned long long)range->end); in trim_dev_dax_range()
424 __release_region(&dax_region->res, range->start, range_len(range)); in trim_dev_dax_range()
425 if (--dev_dax->nr_range == 0) { in trim_dev_dax_range()
426 kfree(dev_dax->ranges); in trim_dev_dax_range()
427 dev_dax->ranges = NULL; in trim_dev_dax_range()
433 while (dev_dax->nr_range) in free_dev_dax_ranges()
459 kref_put(&dax_region->kref, dax_region_free); in dax_region_put()
465 struct device *dev = &dev_dax->dev; in __free_dev_dax_id()
467 int rc = dev_dax->id; in __free_dev_dax_id()
471 if (!dev_dax->dyn_id || dev_dax->id < 0) in __free_dev_dax_id()
472 return -1; in __free_dev_dax_id()
473 dax_region = dev_dax->region; in __free_dev_dax_id()
474 ida_free(&dax_region->ida, dev_dax->id); in __free_dev_dax_id()
476 dev_dax->id = -1; in __free_dev_dax_id()
482 struct device *dev = &dev_dax->dev; in free_dev_dax_id()
493 struct dax_region *dax_region = dev_dax->region; in alloc_dev_dax_id()
496 id = ida_alloc(&dax_region->ida, GFP_KERNEL); in alloc_dev_dax_id()
499 kref_get(&dax_region->kref); in alloc_dev_dax_id()
500 dev_dax->dyn_id = true; in alloc_dev_dax_id()
501 dev_dax->id = id; in alloc_dev_dax_id()
515 return -EINVAL; in delete_store()
517 victim = device_find_child_by_name(dax_region->dev, buf); in delete_store()
519 return -ENXIO; in delete_store()
524 if (victim->driver || dev_dax_size(dev_dax)) in delete_store()
525 rc = -EBUSY; in delete_store()
529 * again, but always preserve device-id-0 so that in delete_store()
533 if (dev_dax->id > 0) { in delete_store()
536 if (dax_region->seed == victim) in delete_store()
537 dax_region->seed = NULL; in delete_store()
538 if (dax_region->youngest == victim) in delete_store()
539 dax_region->youngest = NULL; in delete_store()
541 rc = -EBUSY; in delete_store()
567 return a->mode; in dax_region_visible()
596 sysfs_remove_groups(&dax_region->dev->kobj, in dax_region_unregister()
609 * parent->driver_data. This WARN is a reminder / safeguard for in alloc_dax_region()
610 * developers of device-dax drivers. in alloc_dax_region()
617 if (!IS_ALIGNED(range->start, align) in alloc_dax_region()
626 kref_init(&dax_region->kref); in alloc_dax_region()
627 dax_region->id = region_id; in alloc_dax_region()
628 dax_region->align = align; in alloc_dax_region()
629 dax_region->dev = parent; in alloc_dax_region()
630 dax_region->target_node = target_node; in alloc_dax_region()
631 ida_init(&dax_region->ida); in alloc_dax_region()
632 dax_region->res = (struct resource) { in alloc_dax_region()
633 .start = range->start, in alloc_dax_region()
634 .end = range->end, in alloc_dax_region()
638 if (sysfs_create_groups(&parent->kobj, dax_region_attribute_groups)) { in alloc_dax_region()
652 struct device *parent = dev->parent; in dax_mapping_release()
655 ida_free(&dev_dax->ida, mapping->id); in dax_mapping_release()
664 struct dev_dax *dev_dax = to_dev_dax(dev->parent); in unregister_dax_mapping()
665 struct dax_region *dax_region = dev_dax->region; in unregister_dax_mapping()
669 device_lock_assert(dax_region->dev); in unregister_dax_mapping()
671 dev_dax->ranges[mapping->range_id].mapping = NULL; in unregister_dax_mapping()
672 mapping->range_id = -1; in unregister_dax_mapping()
680 struct dev_dax *dev_dax = to_dev_dax(dev->parent); in get_dax_range()
681 struct dax_region *dax_region = dev_dax->region; in get_dax_range()
683 device_lock(dax_region->dev); in get_dax_range()
684 if (mapping->range_id < 0) { in get_dax_range()
685 device_unlock(dax_region->dev); in get_dax_range()
689 return &dev_dax->ranges[mapping->range_id]; in get_dax_range()
694 struct dax_mapping *mapping = dax_range->mapping; in put_dax_range()
695 struct dev_dax *dev_dax = to_dev_dax(mapping->dev.parent); in put_dax_range()
696 struct dax_region *dax_region = dev_dax->region; in put_dax_range()
698 device_unlock(dax_region->dev); in put_dax_range()
709 return -ENXIO; in start_show()
710 rc = sprintf(buf, "%#llx\n", dax_range->range.start); in start_show()
725 return -ENXIO; in end_show()
726 rc = sprintf(buf, "%#llx\n", dax_range->range.end); in end_show()
741 return -ENXIO; in pgoff_show()
742 rc = sprintf(buf, "%#lx\n", dax_range->pgoff); in pgoff_show()
772 struct dax_region *dax_region = dev_dax->region; in devm_register_dax_mapping()
777 device_lock_assert(dax_region->dev); in devm_register_dax_mapping()
779 if (dev_WARN_ONCE(&dev_dax->dev, !dax_region->dev->driver, in devm_register_dax_mapping()
781 return -ENXIO; in devm_register_dax_mapping()
785 return -ENOMEM; in devm_register_dax_mapping()
786 mapping->range_id = range_id; in devm_register_dax_mapping()
787 mapping->id = ida_alloc(&dev_dax->ida, GFP_KERNEL); in devm_register_dax_mapping()
788 if (mapping->id < 0) { in devm_register_dax_mapping()
790 return -ENOMEM; in devm_register_dax_mapping()
792 dev_dax->ranges[range_id].mapping = mapping; in devm_register_dax_mapping()
793 dev = &mapping->dev; in devm_register_dax_mapping()
795 dev->parent = &dev_dax->dev; in devm_register_dax_mapping()
796 get_device(dev->parent); in devm_register_dax_mapping()
797 dev->type = &dax_mapping_type; in devm_register_dax_mapping()
798 dev_set_name(dev, "mapping%d", mapping->id); in devm_register_dax_mapping()
805 rc = devm_add_action_or_reset(dax_region->dev, unregister_dax_mapping, in devm_register_dax_mapping()
815 struct dax_region *dax_region = dev_dax->region; in alloc_dev_dax_range()
816 struct resource *res = &dax_region->res; in alloc_dev_dax_range()
817 struct device *dev = &dev_dax->dev; in alloc_dev_dax_range()
818 struct dev_dax_range *ranges; in alloc_dev_dax_range() local
820 struct resource *alloc; in alloc_dev_dax_range() local
823 device_lock_assert(dax_region->dev); in alloc_dev_dax_range()
825 /* handle the seed alloc special case */ in alloc_dev_dax_range()
827 if (dev_WARN_ONCE(dev, dev_dax->nr_range, in alloc_dev_dax_range()
828 "0-size allocation must be first\n")) in alloc_dev_dax_range()
829 return -EBUSY; in alloc_dev_dax_range()
830 /* nr_range == 0 is elsewhere special cased as 0-size device */ in alloc_dev_dax_range()
834 alloc = __request_region(res, start, size, dev_name(dev), 0); in alloc_dev_dax_range()
835 if (!alloc) in alloc_dev_dax_range()
836 return -ENOMEM; in alloc_dev_dax_range()
838 ranges = krealloc(dev_dax->ranges, sizeof(*ranges) in alloc_dev_dax_range()
839 * (dev_dax->nr_range + 1), GFP_KERNEL); in alloc_dev_dax_range()
840 if (!ranges) { in alloc_dev_dax_range()
841 __release_region(res, alloc->start, resource_size(alloc)); in alloc_dev_dax_range()
842 return -ENOMEM; in alloc_dev_dax_range()
845 for (i = 0; i < dev_dax->nr_range; i++) in alloc_dev_dax_range()
846 pgoff += PHYS_PFN(range_len(&ranges[i].range)); in alloc_dev_dax_range()
847 dev_dax->ranges = ranges; in alloc_dev_dax_range()
848 ranges[dev_dax->nr_range++] = (struct dev_dax_range) { in alloc_dev_dax_range()
851 .start = alloc->start, in alloc_dev_dax_range()
852 .end = alloc->end, in alloc_dev_dax_range()
856 dev_dbg(dev, "alloc range[%d]: %pa:%pa\n", dev_dax->nr_range - 1, in alloc_dev_dax_range()
857 &alloc->start, &alloc->end); in alloc_dev_dax_range()
863 if (!device_is_registered(&dev_dax->dev)) in alloc_dev_dax_range()
866 rc = devm_register_dax_mapping(dev_dax, dev_dax->nr_range - 1); in alloc_dev_dax_range()
875 int last_range = dev_dax->nr_range - 1; in adjust_dev_dax_range()
876 struct dev_dax_range *dax_range = &dev_dax->ranges[last_range]; in adjust_dev_dax_range()
877 struct dax_region *dax_region = dev_dax->region; in adjust_dev_dax_range()
879 struct range *range = &dax_range->range; in adjust_dev_dax_range()
880 struct device *dev = &dev_dax->dev; in adjust_dev_dax_range()
883 device_lock_assert(dax_region->dev); in adjust_dev_dax_range()
886 return -EINVAL; in adjust_dev_dax_range()
888 rc = adjust_resource(res, range->start, size); in adjust_dev_dax_range()
893 .start = range->start, in adjust_dev_dax_range()
894 .end = range->start + size - 1, in adjust_dev_dax_range()
898 last_range, (unsigned long long) range->start, in adjust_dev_dax_range()
899 (unsigned long long) range->end); in adjust_dev_dax_range()
923 return IS_ALIGNED(size, max_t(unsigned long, dev_dax->align, memremap_compat_align())); in alloc_is_aligned()
928 resource_size_t to_shrink = dev_dax_size(dev_dax) - size; in dev_dax_shrink()
929 struct dax_region *dax_region = dev_dax->region; in dev_dax_shrink()
930 struct device *dev = &dev_dax->dev; in dev_dax_shrink()
933 for (i = dev_dax->nr_range - 1; i >= 0; i--) { in dev_dax_shrink()
934 struct range *range = &dev_dax->ranges[i].range; in dev_dax_shrink()
935 struct dax_mapping *mapping = dev_dax->ranges[i].mapping; in dev_dax_shrink()
941 devm_release_action(dax_region->dev, in dev_dax_shrink()
942 unregister_dax_mapping, &mapping->dev); in dev_dax_shrink()
944 to_shrink -= shrink; in dev_dax_shrink()
951 if (strcmp(res->name, dev_name(dev)) == 0 in dev_dax_shrink()
952 && res->start == range->start) { in dev_dax_shrink()
957 if (dev_WARN_ONCE(dev, !adjust || i != dev_dax->nr_range - 1, in dev_dax_shrink()
959 return -ENXIO; in dev_dax_shrink()
961 - shrink); in dev_dax_shrink()
968 * allocations. I.e. the dev_dax->ranges array is ordered by increasing pgoff.
975 if (dev_dax->nr_range == 0) in adjust_ok()
977 if (strcmp(res->name, dev_name(&dev_dax->dev)) != 0) in adjust_ok()
979 last = &dev_dax->ranges[dev_dax->nr_range - 1]; in adjust_ok()
980 if (last->range.start != res->start || last->range.end != res->end) in adjust_ok()
982 for (i = 0; i < dev_dax->nr_range - 1; i++) { in adjust_ok()
983 struct dev_dax_range *dax_range = &dev_dax->ranges[i]; in adjust_ok()
985 if (dax_range->pgoff > last->pgoff) in adjust_ok()
997 struct resource *region_res = &dax_region->res; in dev_dax_resize()
998 struct device *dev = &dev_dax->dev; in dev_dax_resize()
1000 resource_size_t alloc = 0; in dev_dax_resize() local
1003 if (dev->driver) in dev_dax_resize()
1004 return -EBUSY; in dev_dax_resize()
1007 if (size > dev_size && size - dev_size > avail) in dev_dax_resize()
1008 return -ENOSPC; in dev_dax_resize()
1012 to_alloc = size - dev_size; in dev_dax_resize()
1015 return -ENXIO; in dev_dax_resize()
1023 first = region_res->child; in dev_dax_resize()
1025 return alloc_dev_dax_range(dev_dax, dax_region->res.start, to_alloc); in dev_dax_resize()
1027 rc = -ENOSPC; in dev_dax_resize()
1028 for (res = first; res; res = res->sibling) { in dev_dax_resize()
1029 struct resource *next = res->sibling; in dev_dax_resize()
1032 if (res == first && res->start > dax_region->res.start) { in dev_dax_resize()
1033 alloc = min(res->start - dax_region->res.start, to_alloc); in dev_dax_resize()
1034 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, alloc); in dev_dax_resize()
1038 alloc = 0; in dev_dax_resize()
1040 if (next && next->start > res->end + 1) in dev_dax_resize()
1041 alloc = min(next->start - (res->end + 1), to_alloc); in dev_dax_resize()
1044 if (!alloc && !next && res->end < region_res->end) in dev_dax_resize()
1045 alloc = min(region_res->end - res->end, to_alloc); in dev_dax_resize()
1047 if (!alloc) in dev_dax_resize()
1051 rc = adjust_dev_dax_range(dev_dax, res, resource_size(res) + alloc); in dev_dax_resize()
1054 rc = alloc_dev_dax_range(dev_dax, res->end + 1, alloc); in dev_dax_resize()
1059 to_alloc -= alloc; in dev_dax_resize()
1071 struct dax_region *dax_region = dev_dax->region; in size_store()
1079 return -EINVAL; in size_store()
1082 device_lock(dax_region->dev); in size_store()
1083 if (!dax_region->dev->driver) { in size_store()
1084 device_unlock(dax_region->dev); in size_store()
1085 return -ENXIO; in size_store()
1090 device_unlock(dax_region->dev); in size_store()
1100 ssize_t rc = -EINVAL; in range_parse()
1107 start = strsep(&end, "-"); in range_parse()
1114 range->start = addr; in range_parse()
1119 range->end = addr; in range_parse()
1130 struct dax_region *dax_region = dev_dax->region; in mapping_store()
1139 rc = -ENXIO; in mapping_store()
1140 device_lock(dax_region->dev); in mapping_store()
1141 if (!dax_region->dev->driver) { in mapping_store()
1142 device_unlock(dax_region->dev); in mapping_store()
1151 device_unlock(dax_region->dev); in mapping_store()
1162 return sprintf(buf, "%d\n", dev_dax->align); in align_show()
1167 struct device *dev = &dev_dax->dev; in dev_dax_validate_align()
1170 for (i = 0; i < dev_dax->nr_range; i++) { in dev_dax_validate_align()
1171 size_t len = range_len(&dev_dax->ranges[i].range); in dev_dax_validate_align()
1175 __func__, dev_dax->align, i); in dev_dax_validate_align()
1176 return -EINVAL; in dev_dax_validate_align()
1187 struct dax_region *dax_region = dev_dax->region; in align_store()
1193 return -ENXIO; in align_store()
1196 return -EINVAL; in align_store()
1198 device_lock(dax_region->dev); in align_store()
1199 if (!dax_region->dev->driver) { in align_store()
1200 device_unlock(dax_region->dev); in align_store()
1201 return -ENXIO; in align_store()
1205 if (dev->driver) { in align_store()
1206 rc = -EBUSY; in align_store()
1210 align_save = dev_dax->align; in align_store()
1211 dev_dax->align = val; in align_store()
1214 dev_dax->align = align_save; in align_store()
1217 device_unlock(dax_region->dev); in align_store()
1224 struct dax_region *dax_region = dev_dax->region; in dev_dax_target_node()
1226 return dax_region->target_node; in dev_dax_target_node()
1242 struct dax_region *dax_region = dev_dax->region; in resource_show()
1245 if (dev_dax->nr_range < 1) in resource_show()
1246 start = dax_region->res.start; in resource_show()
1248 start = dev_dax->ranges[0].range.start; in resource_show()
1258 * We only ever expect to handle device-dax instances, i.e. the in modalias_show()
1276 struct dax_region *dax_region = dev_dax->region; in dev_dax_visible()
1287 return a->mode; in dev_dax_visible()
1314 struct dax_device *dax_dev = dev_dax->dax_dev; in dev_dax_release()
1318 kfree(dev_dax->pgmap); in dev_dax_release()
1329 struct dax_region *dax_region = data->dax_region; in devm_create_dev_dax()
1330 struct device *parent = dax_region->dev; in devm_create_dev_dax()
1339 return ERR_PTR(-ENOMEM); in devm_create_dev_dax()
1341 dev_dax->region = dax_region; in devm_create_dev_dax()
1343 if (dev_WARN_ONCE(parent, data->id < 0, in devm_create_dev_dax()
1345 rc = -EINVAL; in devm_create_dev_dax()
1349 dev_dax->id = data->id; in devm_create_dev_dax()
1351 if (dev_WARN_ONCE(parent, data->id >= 0, in devm_create_dev_dax()
1353 rc = -EINVAL; in devm_create_dev_dax()
1362 dev = &dev_dax->dev; in devm_create_dev_dax()
1364 dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id); in devm_create_dev_dax()
1366 rc = alloc_dev_dax_range(dev_dax, dax_region->res.start, data->size); in devm_create_dev_dax()
1370 if (data->pgmap) { in devm_create_dev_dax()
1374 dev_dax->pgmap = kmemdup(data->pgmap, in devm_create_dev_dax()
1376 if (!dev_dax->pgmap) { in devm_create_dev_dax()
1377 rc = -ENOMEM; in devm_create_dev_dax()
1398 dev_dax->dax_dev = dax_dev; in devm_create_dev_dax()
1399 dev_dax->target_node = dax_region->target_node; in devm_create_dev_dax()
1400 dev_dax->align = dax_region->align; in devm_create_dev_dax()
1401 ida_init(&dev_dax->ida); in devm_create_dev_dax()
1404 dev->devt = inode->i_rdev; in devm_create_dev_dax()
1405 dev->bus = &dax_bus_type; in devm_create_dev_dax()
1406 dev->parent = parent; in devm_create_dev_dax()
1407 dev->type = &dev_dax_type; in devm_create_dev_dax()
1416 rc = devm_add_action_or_reset(dax_region->dev, unregister_dev_dax, dev); in devm_create_dev_dax()
1421 if (dev_dax->nr_range && range_len(&dev_dax->ranges[0].range)) { in devm_create_dev_dax()
1430 kfree(dev_dax->pgmap); in devm_create_dev_dax()
1445 struct device_driver *drv = &dax_drv->drv; in __dax_driver_register()
1448 * dax_bus_probe() calls dax_drv->probe() unconditionally. in __dax_driver_register()
1451 if (!dax_drv->probe) in __dax_driver_register()
1452 return -EINVAL; in __dax_driver_register()
1454 INIT_LIST_HEAD(&dax_drv->ids); in __dax_driver_register()
1455 drv->owner = module; in __dax_driver_register()
1456 drv->name = mod_name; in __dax_driver_register()
1457 drv->mod_name = mod_name; in __dax_driver_register()
1458 drv->bus = &dax_bus_type; in __dax_driver_register()
1466 struct device_driver *drv = &dax_drv->drv; in dax_driver_unregister()
1470 list_for_each_entry_safe(dax_id, _id, &dax_drv->ids, list) { in dax_driver_unregister()
1471 list_del(&dax_id->list); in dax_driver_unregister()