Lines Matching +full:container +full:- +full:rules
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
14 #include "nd-core.h"
33 if (!nvdimm->cmd_mask || in nvdimm_check_config_data()
34 !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) { in nvdimm_check_config_data()
35 if (test_bit(NDD_LABELING, &nvdimm->flags)) in nvdimm_check_config_data()
36 return -ENXIO; in nvdimm_check_config_data()
38 return -ENOTTY; in nvdimm_check_config_data()
49 return -EINVAL; in validate_dimm()
51 rc = nvdimm_check_config_data(ndd->dev); in validate_dimm()
53 dev_dbg(ndd->dev, "%ps: %s error: %d\n", in validate_dimm()
59 * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
64 struct nd_cmd_get_config_size *cmd = &ndd->nsarea; in nvdimm_init_nsarea()
65 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); in nvdimm_init_nsarea()
73 if (cmd->config_size) in nvdimm_init_nsarea()
77 nd_desc = nvdimm_bus->nd_desc; in nvdimm_init_nsarea()
78 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), in nvdimm_init_nsarea()
88 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); in nvdimm_get_config_data()
89 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; in nvdimm_get_config_data()
97 if (offset + len > ndd->nsarea.config_size) in nvdimm_get_config_data()
98 return -ENXIO; in nvdimm_get_config_data()
100 max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer); in nvdimm_get_config_data()
103 return -ENOMEM; in nvdimm_get_config_data()
106 len -= cmd->in_length, buf_offset += cmd->in_length) { in nvdimm_get_config_data()
109 cmd->in_offset = offset + buf_offset; in nvdimm_get_config_data()
110 cmd->in_length = min(max_cmd_size, len); in nvdimm_get_config_data()
112 cmd_size = sizeof(*cmd) + cmd->in_length; in nvdimm_get_config_data()
114 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), in nvdimm_get_config_data()
124 memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length); in nvdimm_get_config_data()
137 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); in nvdimm_set_config_data()
138 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; in nvdimm_set_config_data()
143 if (offset + len > ndd->nsarea.config_size) in nvdimm_set_config_data()
144 return -ENXIO; in nvdimm_set_config_data()
146 max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer); in nvdimm_set_config_data()
149 return -ENOMEM; in nvdimm_set_config_data()
151 for (buf_offset = 0; len; len -= cmd->in_length, in nvdimm_set_config_data()
152 buf_offset += cmd->in_length) { in nvdimm_set_config_data()
155 cmd->in_offset = offset + buf_offset; in nvdimm_set_config_data()
156 cmd->in_length = min(max_cmd_size, len); in nvdimm_set_config_data()
157 memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length); in nvdimm_set_config_data()
159 /* status is output in the last 4-bytes of the command buffer */ in nvdimm_set_config_data()
160 cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); in nvdimm_set_config_data()
162 rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), in nvdimm_set_config_data()
180 set_bit(NDD_LABELING, &nvdimm->flags); in nvdimm_set_labeling()
187 set_bit(NDD_LOCKED, &nvdimm->flags); in nvdimm_set_locked()
194 clear_bit(NDD_LOCKED, &nvdimm->flags); in nvdimm_clear_locked()
201 ida_simple_remove(&dimm_ida, nvdimm->id); in nvdimm_release()
216 struct nd_region *nd_region = &ndbr->nd_region; in nd_blk_region_to_dimm()
217 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in nd_blk_region_to_dimm()
219 return nd_mapping->nvdimm; in nd_blk_region_to_dimm()
232 struct nvdimm *nvdimm = nd_mapping->nvdimm; in to_ndd()
234 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev)); in to_ndd()
236 return dev_get_drvdata(&nvdimm->dev); in to_ndd()
243 struct device *dev = ndd->dev; in nvdimm_drvdata_release()
252 kvfree(ndd->data); in nvdimm_drvdata_release()
259 kref_get(&ndd->kref); in get_ndd()
265 kref_put(&ndd->kref, nvdimm_drvdata_release); in put_ndd()
270 return dev_name(&nvdimm->dev); in nvdimm_name()
276 return &nvdimm->dev.kobj; in nvdimm_kobj()
282 return nvdimm->cmd_mask; in nvdimm_cmd_mask()
289 return nvdimm->provider_data; in nvdimm_provider_data()
300 if (!nvdimm->cmd_mask) in commands_show()
303 for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG) in commands_show()
316 test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "", in flags_show()
317 test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "", in flags_show()
318 test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : ""); in flags_show()
333 return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy) in state_show()
345 return -ENXIO; in __available_slots_show()
347 dev = ndd->dev; in __available_slots_show()
350 if (nfree - 1 > nfree) { in __available_slots_show()
354 nfree--; in __available_slots_show()
378 if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags)) in security_show()
380 if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags)) in security_show()
382 if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags)) in security_show()
384 if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags)) in security_show()
386 return -ENOTTY; in security_show()
395 &nvdimm->sec.flags)); in frozen_show()
437 return a->mode; in nvdimm_visible()
438 if (!nvdimm->sec.flags) in nvdimm_visible()
443 if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable in nvdimm_visible()
444 || nvdimm->sec.ops->change_key in nvdimm_visible()
445 || nvdimm->sec.ops->erase in nvdimm_visible()
446 || nvdimm->sec.ops->overwrite) in nvdimm_visible()
447 return a->mode; in nvdimm_visible()
451 if (nvdimm->sec.ops->freeze) in nvdimm_visible()
452 return a->mode; in nvdimm_visible()
466 if (!nvdimm->fw_ops) in result_show()
467 return -EOPNOTSUPP; in result_show()
470 result = nvdimm->fw_ops->activate_result(nvdimm); in result_show()
485 return -ENXIO; in result_show()
495 if (!nvdimm->fw_ops) in activate_show()
496 return -EOPNOTSUPP; in activate_show()
499 state = nvdimm->fw_ops->activate_state(nvdimm); in activate_show()
510 return -ENXIO; in activate_show()
521 if (!nvdimm->fw_ops) in activate_store()
522 return -EOPNOTSUPP; in activate_store()
529 return -EINVAL; in activate_store()
532 rc = nvdimm->fw_ops->arm(nvdimm, arg); in activate_store()
551 struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; in nvdimm_firmware_visible()
555 if (!nd_desc->fw_ops) in nvdimm_firmware_visible()
557 if (!nvdimm->fw_ops) in nvdimm_firmware_visible()
561 cap = nd_desc->fw_ops->capability(nd_desc); in nvdimm_firmware_visible()
567 return a->mode; in nvdimm_firmware_visible()
591 return dev->type == &nvdimm_device_type; in is_nvdimm()
607 nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL); in __nvdimm_create()
608 if (nvdimm->id < 0) { in __nvdimm_create()
613 nvdimm->dimm_id = dimm_id; in __nvdimm_create()
614 nvdimm->provider_data = provider_data; in __nvdimm_create()
617 nvdimm->flags = flags; in __nvdimm_create()
618 nvdimm->cmd_mask = cmd_mask; in __nvdimm_create()
619 nvdimm->num_flush = num_flush; in __nvdimm_create()
620 nvdimm->flush_wpq = flush_wpq; in __nvdimm_create()
621 atomic_set(&nvdimm->busy, 0); in __nvdimm_create()
622 dev = &nvdimm->dev; in __nvdimm_create()
623 dev_set_name(dev, "nmem%d", nvdimm->id); in __nvdimm_create()
624 dev->parent = &nvdimm_bus->dev; in __nvdimm_create()
625 dev->type = &nvdimm_device_type; in __nvdimm_create()
626 dev->devt = MKDEV(nvdimm_major, nvdimm->id); in __nvdimm_create()
627 dev->groups = groups; in __nvdimm_create()
628 nvdimm->sec.ops = sec_ops; in __nvdimm_create()
629 nvdimm->fw_ops = fw_ops; in __nvdimm_create()
630 nvdimm->sec.overwrite_tmo = 0; in __nvdimm_create()
631 INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query); in __nvdimm_create()
637 nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); in __nvdimm_create()
638 nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER); in __nvdimm_create()
649 sysfs_put(nvdimm->sec.overwrite_state); in shutdown_security_notify()
656 if (!nvdimm->sec.flags || !nvdimm->sec.ops in nvdimm_security_setup_events()
657 || !nvdimm->sec.ops->overwrite) in nvdimm_security_setup_events()
659 nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security"); in nvdimm_security_setup_events()
660 if (!nvdimm->sec.overwrite_state) in nvdimm_security_setup_events()
661 return -ENOMEM; in nvdimm_security_setup_events()
669 return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags); in nvdimm_in_overwrite()
677 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev)); in nvdimm_security_freeze()
679 if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze) in nvdimm_security_freeze()
680 return -EOPNOTSUPP; in nvdimm_security_freeze()
682 if (!nvdimm->sec.flags) in nvdimm_security_freeze()
683 return -EIO; in nvdimm_security_freeze()
685 if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) { in nvdimm_security_freeze()
686 dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n"); in nvdimm_security_freeze()
687 return -EBUSY; in nvdimm_security_freeze()
690 rc = nvdimm->sec.ops->freeze(nvdimm); in nvdimm_security_freeze()
691 nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER); in nvdimm_security_freeze()
698 struct device *dev = &nd_region->dev; in dpa_align()
703 if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align in dpa_align()
704 % nd_region->ndr_mappings, in dpa_align()
706 nd_region->align, nd_region->ndr_mappings)) in dpa_align()
708 return nd_region->align / nd_region->ndr_mappings; in dpa_align()
726 for (i = 0; i < nd_region->ndr_mappings; i++) { in alias_dpa_busy()
727 nd_mapping = &nd_region->mapping[i]; in alias_dpa_busy()
728 if (nd_mapping->nvdimm == info->nd_mapping->nvdimm) in alias_dpa_busy()
732 if (i >= nd_region->ndr_mappings) in alias_dpa_busy()
736 map_end = nd_mapping->start + nd_mapping->size - 1; in alias_dpa_busy()
737 blk_start = nd_mapping->start; in alias_dpa_busy()
740 * In the allocation case ->res is set to free space that we are in alias_dpa_busy()
741 * looking to validate against PMEM aliasing collision rules in alias_dpa_busy()
744 if (info->res) { in alias_dpa_busy()
745 if (info->res->start >= nd_mapping->start in alias_dpa_busy()
746 && info->res->start < map_end) in alias_dpa_busy()
755 * the end of the interleave-set mapping. in alias_dpa_busy()
764 if (strncmp(res->name, "pmem", 4) != 0) in alias_dpa_busy()
767 start = ALIGN_DOWN(res->start, align); in alias_dpa_busy()
768 end = ALIGN(res->end + 1, align) - 1; in alias_dpa_busy()
780 if (info->res && blk_start > info->res->start) { in alias_dpa_busy()
781 info->res->start = max(info->res->start, blk_start); in alias_dpa_busy()
782 if (info->res->start > info->res->end) in alias_dpa_busy()
783 info->res->end = info->res->start - 1; in alias_dpa_busy()
787 info->available -= blk_start - nd_mapping->start; in alias_dpa_busy()
793 * nd_blk_available_dpa - account the unused dpa of BLK region
794 * @nd_mapping: container of dpa-resource-root + labels
802 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev); in nd_blk_available_dpa()
803 struct nd_mapping *nd_mapping = &nd_region->mapping[0]; in nd_blk_available_dpa()
807 .available = nd_mapping->size, in nd_blk_available_dpa()
816 device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy); in nd_blk_available_dpa()
825 if (strncmp(res->name, "blk", 3) != 0) in nd_blk_available_dpa()
827 start = ALIGN_DOWN(res->start, align); in nd_blk_available_dpa()
828 end = ALIGN(res->end + 1, align) - 1; in nd_blk_available_dpa()
829 size = end - start + 1; in nd_blk_available_dpa()
832 info.available -= size; in nd_blk_available_dpa()
839 * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
842 * @nd_mapping: container of dpa-resource-root + labels
861 nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); in nd_pmem_max_contiguous_dpa()
862 if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm)) in nd_pmem_max_contiguous_dpa()
867 if (strcmp(res->name, "pmem-reserve") != 0) in nd_pmem_max_contiguous_dpa()
870 start = ALIGN(res->start, align); in nd_pmem_max_contiguous_dpa()
871 end = ALIGN_DOWN(res->end + 1, align) - 1; in nd_pmem_max_contiguous_dpa()
874 if (end - start + 1 > max) in nd_pmem_max_contiguous_dpa()
875 max = end - start + 1; in nd_pmem_max_contiguous_dpa()
882 * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
883 * @nd_mapping: container of dpa-resource-root + labels
893 * the interleave set. Once that value is determined the PMEM-limit for
912 map_start = nd_mapping->start; in nd_pmem_available_dpa()
913 map_end = map_start + nd_mapping->size - 1; in nd_pmem_available_dpa()
914 blk_start = max(map_start, map_end + 1 - *overlap); in nd_pmem_available_dpa()
918 start = ALIGN_DOWN(res->start, align); in nd_pmem_available_dpa()
919 end = ALIGN(res->end + 1, align) - 1; in nd_pmem_available_dpa()
921 if (strncmp(res->name, "blk", 3) == 0) in nd_pmem_available_dpa()
928 busy += end - start + 1; in nd_pmem_available_dpa()
930 if (strncmp(res->name, "blk", 3) == 0) { in nd_pmem_available_dpa()
938 busy += end - start + 1; in nd_pmem_available_dpa()
941 busy += nd_mapping->size; in nd_pmem_available_dpa()
946 *overlap = map_end + 1 - blk_start; in nd_pmem_available_dpa()
947 available = blk_start - map_start; in nd_pmem_available_dpa()
949 return ALIGN_DOWN(available - busy, align); in nd_pmem_available_dpa()
959 WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); in nvdimm_free_dpa()
960 kfree(res->name); in nvdimm_free_dpa()
961 __release_region(&ndd->dpa, res->start, resource_size(res)); in nvdimm_free_dpa()
974 WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev)); in nvdimm_allocate_dpa()
975 res = __request_region(&ndd->dpa, start, n, name, 0); in nvdimm_allocate_dpa()
982 * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
983 * @nvdimm: container of dpa-resource-root + labels
984 * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
993 if (strcmp(res->name, label_id->id) == 0) in nvdimm_allocated_dpa()
1014 device_for_each_child(&nvdimm_bus->dev, &count, count_dimms); in nvdimm_bus_check_dimm_count()
1015 dev_dbg(&nvdimm_bus->dev, "count: %d\n", count); in nvdimm_bus_check_dimm_count()
1017 return -ENXIO; in nvdimm_bus_check_dimm_count()