• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4  */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/moduleparam.h>
7 #include <linux/vmalloc.h>
8 #include <linux/device.h>
9 #include <linux/ndctl.h>
10 #include <linux/slab.h>
11 #include <linux/io.h>
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include "nd-core.h"
15 #include "label.h"
16 #include "pmem.h"
17 #include "nd.h"
18 
19 static DEFINE_IDA(dimm_ida);
20 
21 static bool noblk;
22 module_param(noblk, bool, 0444);
23 MODULE_PARM_DESC(noblk, "force disable BLK / local alias support");
24 
25 /*
26  * Retrieve bus and dimm handle and return if this bus supports
27  * get_config_data commands
28  */
nvdimm_check_config_data(struct device * dev)29 int nvdimm_check_config_data(struct device *dev)
30 {
31 	struct nvdimm *nvdimm = to_nvdimm(dev);
32 
33 	if (!nvdimm->cmd_mask ||
34 	    !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
35 		if (test_bit(NDD_LABELING, &nvdimm->flags))
36 			return -ENXIO;
37 		else
38 			return -ENOTTY;
39 	}
40 
41 	return 0;
42 }
43 
validate_dimm(struct nvdimm_drvdata * ndd)44 static int validate_dimm(struct nvdimm_drvdata *ndd)
45 {
46 	int rc;
47 
48 	if (!ndd)
49 		return -EINVAL;
50 
51 	rc = nvdimm_check_config_data(ndd->dev);
52 	if (rc)
53 		dev_dbg(ndd->dev, "%ps: %s error: %d\n",
54 				__builtin_return_address(0), __func__, rc);
55 	return rc;
56 }
57 
58 /**
59  * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
60  * @nvdimm: dimm to initialize
61  */
nvdimm_init_nsarea(struct nvdimm_drvdata * ndd)62 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
63 {
64 	struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
65 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
66 	struct nvdimm_bus_descriptor *nd_desc;
67 	int rc = validate_dimm(ndd);
68 	int cmd_rc = 0;
69 
70 	if (rc)
71 		return rc;
72 
73 	if (cmd->config_size)
74 		return 0; /* already valid */
75 
76 	memset(cmd, 0, sizeof(*cmd));
77 	nd_desc = nvdimm_bus->nd_desc;
78 	rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
79 			ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
80 	if (rc < 0)
81 		return rc;
82 	return cmd_rc;
83 }
84 
nvdimm_get_config_data(struct nvdimm_drvdata * ndd,void * buf,size_t offset,size_t len)85 int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
86 			   size_t offset, size_t len)
87 {
88 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
89 	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
90 	int rc = validate_dimm(ndd), cmd_rc = 0;
91 	struct nd_cmd_get_config_data_hdr *cmd;
92 	size_t max_cmd_size, buf_offset;
93 
94 	if (rc)
95 		return rc;
96 
97 	if (offset + len > ndd->nsarea.config_size)
98 		return -ENXIO;
99 
100 	max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
101 	cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
102 	if (!cmd)
103 		return -ENOMEM;
104 
105 	for (buf_offset = 0; len;
106 	     len -= cmd->in_length, buf_offset += cmd->in_length) {
107 		size_t cmd_size;
108 
109 		cmd->in_offset = offset + buf_offset;
110 		cmd->in_length = min(max_cmd_size, len);
111 
112 		cmd_size = sizeof(*cmd) + cmd->in_length;
113 
114 		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
115 				ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
116 		if (rc < 0)
117 			break;
118 		if (cmd_rc < 0) {
119 			rc = cmd_rc;
120 			break;
121 		}
122 
123 		/* out_buf should be valid, copy it into our output buffer */
124 		memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
125 	}
126 	kvfree(cmd);
127 
128 	return rc;
129 }
130 
nvdimm_set_config_data(struct nvdimm_drvdata * ndd,size_t offset,void * buf,size_t len)131 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
132 		void *buf, size_t len)
133 {
134 	size_t max_cmd_size, buf_offset;
135 	struct nd_cmd_set_config_hdr *cmd;
136 	int rc = validate_dimm(ndd), cmd_rc = 0;
137 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
138 	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
139 
140 	if (rc)
141 		return rc;
142 
143 	if (offset + len > ndd->nsarea.config_size)
144 		return -ENXIO;
145 
146 	max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
147 	cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
148 	if (!cmd)
149 		return -ENOMEM;
150 
151 	for (buf_offset = 0; len; len -= cmd->in_length,
152 			buf_offset += cmd->in_length) {
153 		size_t cmd_size;
154 
155 		cmd->in_offset = offset + buf_offset;
156 		cmd->in_length = min(max_cmd_size, len);
157 		memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
158 
159 		/* status is output in the last 4-bytes of the command buffer */
160 		cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
161 
162 		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
163 				ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
164 		if (rc < 0)
165 			break;
166 		if (cmd_rc < 0) {
167 			rc = cmd_rc;
168 			break;
169 		}
170 	}
171 	kvfree(cmd);
172 
173 	return rc;
174 }
175 
nvdimm_set_labeling(struct device * dev)176 void nvdimm_set_labeling(struct device *dev)
177 {
178 	struct nvdimm *nvdimm = to_nvdimm(dev);
179 
180 	set_bit(NDD_LABELING, &nvdimm->flags);
181 }
182 
nvdimm_set_locked(struct device * dev)183 void nvdimm_set_locked(struct device *dev)
184 {
185 	struct nvdimm *nvdimm = to_nvdimm(dev);
186 
187 	set_bit(NDD_LOCKED, &nvdimm->flags);
188 }
189 
nvdimm_clear_locked(struct device * dev)190 void nvdimm_clear_locked(struct device *dev)
191 {
192 	struct nvdimm *nvdimm = to_nvdimm(dev);
193 
194 	clear_bit(NDD_LOCKED, &nvdimm->flags);
195 }
196 
nvdimm_release(struct device * dev)197 static void nvdimm_release(struct device *dev)
198 {
199 	struct nvdimm *nvdimm = to_nvdimm(dev);
200 
201 	ida_simple_remove(&dimm_ida, nvdimm->id);
202 	kfree(nvdimm);
203 }
204 
to_nvdimm(struct device * dev)205 struct nvdimm *to_nvdimm(struct device *dev)
206 {
207 	struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
208 
209 	WARN_ON(!is_nvdimm(dev));
210 	return nvdimm;
211 }
212 EXPORT_SYMBOL_GPL(to_nvdimm);
213 
nd_blk_region_to_dimm(struct nd_blk_region * ndbr)214 struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
215 {
216 	struct nd_region *nd_region = &ndbr->nd_region;
217 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
218 
219 	return nd_mapping->nvdimm;
220 }
221 EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
222 
nd_blk_memremap_flags(struct nd_blk_region * ndbr)223 unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
224 {
225 	/* pmem mapping properties are private to libnvdimm */
226 	return ARCH_MEMREMAP_PMEM;
227 }
228 EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
229 
to_ndd(struct nd_mapping * nd_mapping)230 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
231 {
232 	struct nvdimm *nvdimm = nd_mapping->nvdimm;
233 
234 	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
235 
236 	return dev_get_drvdata(&nvdimm->dev);
237 }
238 EXPORT_SYMBOL(to_ndd);
239 
nvdimm_drvdata_release(struct kref * kref)240 void nvdimm_drvdata_release(struct kref *kref)
241 {
242 	struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
243 	struct device *dev = ndd->dev;
244 	struct resource *res, *_r;
245 
246 	dev_dbg(dev, "trace\n");
247 	nvdimm_bus_lock(dev);
248 	for_each_dpa_resource_safe(ndd, res, _r)
249 		nvdimm_free_dpa(ndd, res);
250 	nvdimm_bus_unlock(dev);
251 
252 	kvfree(ndd->data);
253 	kfree(ndd);
254 	put_device(dev);
255 }
256 
get_ndd(struct nvdimm_drvdata * ndd)257 void get_ndd(struct nvdimm_drvdata *ndd)
258 {
259 	kref_get(&ndd->kref);
260 }
261 
put_ndd(struct nvdimm_drvdata * ndd)262 void put_ndd(struct nvdimm_drvdata *ndd)
263 {
264 	if (ndd)
265 		kref_put(&ndd->kref, nvdimm_drvdata_release);
266 }
267 
nvdimm_name(struct nvdimm * nvdimm)268 const char *nvdimm_name(struct nvdimm *nvdimm)
269 {
270 	return dev_name(&nvdimm->dev);
271 }
272 EXPORT_SYMBOL_GPL(nvdimm_name);
273 
nvdimm_kobj(struct nvdimm * nvdimm)274 struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
275 {
276 	return &nvdimm->dev.kobj;
277 }
278 EXPORT_SYMBOL_GPL(nvdimm_kobj);
279 
nvdimm_cmd_mask(struct nvdimm * nvdimm)280 unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
281 {
282 	return nvdimm->cmd_mask;
283 }
284 EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
285 
nvdimm_provider_data(struct nvdimm * nvdimm)286 void *nvdimm_provider_data(struct nvdimm *nvdimm)
287 {
288 	if (nvdimm)
289 		return nvdimm->provider_data;
290 	return NULL;
291 }
292 EXPORT_SYMBOL_GPL(nvdimm_provider_data);
293 
commands_show(struct device * dev,struct device_attribute * attr,char * buf)294 static ssize_t commands_show(struct device *dev,
295 		struct device_attribute *attr, char *buf)
296 {
297 	struct nvdimm *nvdimm = to_nvdimm(dev);
298 	int cmd, len = 0;
299 
300 	if (!nvdimm->cmd_mask)
301 		return sprintf(buf, "\n");
302 
303 	for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
304 		len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
305 	len += sprintf(buf + len, "\n");
306 	return len;
307 }
308 static DEVICE_ATTR_RO(commands);
309 
flags_show(struct device * dev,struct device_attribute * attr,char * buf)310 static ssize_t flags_show(struct device *dev,
311 		struct device_attribute *attr, char *buf)
312 {
313 	struct nvdimm *nvdimm = to_nvdimm(dev);
314 
315 	return sprintf(buf, "%s%s%s\n",
316 			test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
317 			test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
318 			test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
319 }
320 static DEVICE_ATTR_RO(flags);
321 
state_show(struct device * dev,struct device_attribute * attr,char * buf)322 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
323 		char *buf)
324 {
325 	struct nvdimm *nvdimm = to_nvdimm(dev);
326 
327 	/*
328 	 * The state may be in the process of changing, userspace should
329 	 * quiesce probing if it wants a static answer
330 	 */
331 	nvdimm_bus_lock(dev);
332 	nvdimm_bus_unlock(dev);
333 	return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
334 			? "active" : "idle");
335 }
336 static DEVICE_ATTR_RO(state);
337 
__available_slots_show(struct nvdimm_drvdata * ndd,char * buf)338 static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf)
339 {
340 	struct device *dev;
341 	ssize_t rc;
342 	u32 nfree;
343 
344 	if (!ndd)
345 		return -ENXIO;
346 
347 	dev = ndd->dev;
348 	nvdimm_bus_lock(dev);
349 	nfree = nd_label_nfree(ndd);
350 	if (nfree - 1 > nfree) {
351 		dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
352 		nfree = 0;
353 	} else
354 		nfree--;
355 	rc = sprintf(buf, "%d\n", nfree);
356 	nvdimm_bus_unlock(dev);
357 	return rc;
358 }
359 
available_slots_show(struct device * dev,struct device_attribute * attr,char * buf)360 static ssize_t available_slots_show(struct device *dev,
361 				    struct device_attribute *attr, char *buf)
362 {
363 	ssize_t rc;
364 
365 	nd_device_lock(dev);
366 	rc = __available_slots_show(dev_get_drvdata(dev), buf);
367 	nd_device_unlock(dev);
368 
369 	return rc;
370 }
371 static DEVICE_ATTR_RO(available_slots);
372 
security_show(struct device * dev,struct device_attribute * attr,char * buf)373 __weak ssize_t security_show(struct device *dev,
374 		struct device_attribute *attr, char *buf)
375 {
376 	struct nvdimm *nvdimm = to_nvdimm(dev);
377 
378 	if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
379 		return sprintf(buf, "overwrite\n");
380 	if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
381 		return sprintf(buf, "disabled\n");
382 	if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
383 		return sprintf(buf, "unlocked\n");
384 	if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
385 		return sprintf(buf, "locked\n");
386 	return -ENOTTY;
387 }
388 
frozen_show(struct device * dev,struct device_attribute * attr,char * buf)389 static ssize_t frozen_show(struct device *dev,
390 		struct device_attribute *attr, char *buf)
391 {
392 	struct nvdimm *nvdimm = to_nvdimm(dev);
393 
394 	return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
395 				&nvdimm->sec.flags));
396 }
397 static DEVICE_ATTR_RO(frozen);
398 
security_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)399 static ssize_t security_store(struct device *dev,
400 		struct device_attribute *attr, const char *buf, size_t len)
401 
402 {
403 	ssize_t rc;
404 
405 	/*
406 	 * Require all userspace triggered security management to be
407 	 * done while probing is idle and the DIMM is not in active use
408 	 * in any region.
409 	 */
410 	nd_device_lock(dev);
411 	nvdimm_bus_lock(dev);
412 	wait_nvdimm_bus_probe_idle(dev);
413 	rc = nvdimm_security_store(dev, buf, len);
414 	nvdimm_bus_unlock(dev);
415 	nd_device_unlock(dev);
416 
417 	return rc;
418 }
419 static DEVICE_ATTR_RW(security);
420 
421 static struct attribute *nvdimm_attributes[] = {
422 	&dev_attr_state.attr,
423 	&dev_attr_flags.attr,
424 	&dev_attr_commands.attr,
425 	&dev_attr_available_slots.attr,
426 	&dev_attr_security.attr,
427 	&dev_attr_frozen.attr,
428 	NULL,
429 };
430 
nvdimm_visible(struct kobject * kobj,struct attribute * a,int n)431 static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
432 {
433 	struct device *dev = container_of(kobj, typeof(*dev), kobj);
434 	struct nvdimm *nvdimm = to_nvdimm(dev);
435 
436 	if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
437 		return a->mode;
438 	if (!nvdimm->sec.flags)
439 		return 0;
440 
441 	if (a == &dev_attr_security.attr) {
442 		/* Are there any state mutation ops (make writable)? */
443 		if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
444 				|| nvdimm->sec.ops->change_key
445 				|| nvdimm->sec.ops->erase
446 				|| nvdimm->sec.ops->overwrite)
447 			return a->mode;
448 		return 0444;
449 	}
450 
451 	if (nvdimm->sec.ops->freeze)
452 		return a->mode;
453 	return 0;
454 }
455 
456 static const struct attribute_group nvdimm_attribute_group = {
457 	.attrs = nvdimm_attributes,
458 	.is_visible = nvdimm_visible,
459 };
460 
result_show(struct device * dev,struct device_attribute * attr,char * buf)461 static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf)
462 {
463 	struct nvdimm *nvdimm = to_nvdimm(dev);
464 	enum nvdimm_fwa_result result;
465 
466 	if (!nvdimm->fw_ops)
467 		return -EOPNOTSUPP;
468 
469 	nvdimm_bus_lock(dev);
470 	result = nvdimm->fw_ops->activate_result(nvdimm);
471 	nvdimm_bus_unlock(dev);
472 
473 	switch (result) {
474 	case NVDIMM_FWA_RESULT_NONE:
475 		return sprintf(buf, "none\n");
476 	case NVDIMM_FWA_RESULT_SUCCESS:
477 		return sprintf(buf, "success\n");
478 	case NVDIMM_FWA_RESULT_FAIL:
479 		return sprintf(buf, "fail\n");
480 	case NVDIMM_FWA_RESULT_NOTSTAGED:
481 		return sprintf(buf, "not_staged\n");
482 	case NVDIMM_FWA_RESULT_NEEDRESET:
483 		return sprintf(buf, "need_reset\n");
484 	default:
485 		return -ENXIO;
486 	}
487 }
488 static DEVICE_ATTR_ADMIN_RO(result);
489 
activate_show(struct device * dev,struct device_attribute * attr,char * buf)490 static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf)
491 {
492 	struct nvdimm *nvdimm = to_nvdimm(dev);
493 	enum nvdimm_fwa_state state;
494 
495 	if (!nvdimm->fw_ops)
496 		return -EOPNOTSUPP;
497 
498 	nvdimm_bus_lock(dev);
499 	state = nvdimm->fw_ops->activate_state(nvdimm);
500 	nvdimm_bus_unlock(dev);
501 
502 	switch (state) {
503 	case NVDIMM_FWA_IDLE:
504 		return sprintf(buf, "idle\n");
505 	case NVDIMM_FWA_BUSY:
506 		return sprintf(buf, "busy\n");
507 	case NVDIMM_FWA_ARMED:
508 		return sprintf(buf, "armed\n");
509 	default:
510 		return -ENXIO;
511 	}
512 }
513 
activate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)514 static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
515 		const char *buf, size_t len)
516 {
517 	struct nvdimm *nvdimm = to_nvdimm(dev);
518 	enum nvdimm_fwa_trigger arg;
519 	int rc;
520 
521 	if (!nvdimm->fw_ops)
522 		return -EOPNOTSUPP;
523 
524 	if (sysfs_streq(buf, "arm"))
525 		arg = NVDIMM_FWA_ARM;
526 	else if (sysfs_streq(buf, "disarm"))
527 		arg = NVDIMM_FWA_DISARM;
528 	else
529 		return -EINVAL;
530 
531 	nvdimm_bus_lock(dev);
532 	rc = nvdimm->fw_ops->arm(nvdimm, arg);
533 	nvdimm_bus_unlock(dev);
534 
535 	if (rc < 0)
536 		return rc;
537 	return len;
538 }
539 static DEVICE_ATTR_ADMIN_RW(activate);
540 
541 static struct attribute *nvdimm_firmware_attributes[] = {
542 	&dev_attr_activate.attr,
543 	&dev_attr_result.attr,
544 	NULL,
545 };
546 
nvdimm_firmware_visible(struct kobject * kobj,struct attribute * a,int n)547 static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
548 {
549 	struct device *dev = container_of(kobj, typeof(*dev), kobj);
550 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
551 	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
552 	struct nvdimm *nvdimm = to_nvdimm(dev);
553 	enum nvdimm_fwa_capability cap;
554 
555 	if (!nd_desc->fw_ops)
556 		return 0;
557 	if (!nvdimm->fw_ops)
558 		return 0;
559 
560 	nvdimm_bus_lock(dev);
561 	cap = nd_desc->fw_ops->capability(nd_desc);
562 	nvdimm_bus_unlock(dev);
563 
564 	if (cap < NVDIMM_FWA_CAP_QUIESCE)
565 		return 0;
566 
567 	return a->mode;
568 }
569 
570 static const struct attribute_group nvdimm_firmware_attribute_group = {
571 	.name = "firmware",
572 	.attrs = nvdimm_firmware_attributes,
573 	.is_visible = nvdimm_firmware_visible,
574 };
575 
576 static const struct attribute_group *nvdimm_attribute_groups[] = {
577 	&nd_device_attribute_group,
578 	&nvdimm_attribute_group,
579 	&nvdimm_firmware_attribute_group,
580 	NULL,
581 };
582 
583 static const struct device_type nvdimm_device_type = {
584 	.name = "nvdimm",
585 	.release = nvdimm_release,
586 	.groups = nvdimm_attribute_groups,
587 };
588 
is_nvdimm(struct device * dev)589 bool is_nvdimm(struct device *dev)
590 {
591 	return dev->type == &nvdimm_device_type;
592 }
593 
__nvdimm_create(struct nvdimm_bus * nvdimm_bus,void * provider_data,const struct attribute_group ** groups,unsigned long flags,unsigned long cmd_mask,int num_flush,struct resource * flush_wpq,const char * dimm_id,const struct nvdimm_security_ops * sec_ops,const struct nvdimm_fw_ops * fw_ops)594 struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
595 		void *provider_data, const struct attribute_group **groups,
596 		unsigned long flags, unsigned long cmd_mask, int num_flush,
597 		struct resource *flush_wpq, const char *dimm_id,
598 		const struct nvdimm_security_ops *sec_ops,
599 		const struct nvdimm_fw_ops *fw_ops)
600 {
601 	struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
602 	struct device *dev;
603 
604 	if (!nvdimm)
605 		return NULL;
606 
607 	nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
608 	if (nvdimm->id < 0) {
609 		kfree(nvdimm);
610 		return NULL;
611 	}
612 
613 	nvdimm->dimm_id = dimm_id;
614 	nvdimm->provider_data = provider_data;
615 	if (noblk)
616 		flags |= 1 << NDD_NOBLK;
617 	nvdimm->flags = flags;
618 	nvdimm->cmd_mask = cmd_mask;
619 	nvdimm->num_flush = num_flush;
620 	nvdimm->flush_wpq = flush_wpq;
621 	atomic_set(&nvdimm->busy, 0);
622 	dev = &nvdimm->dev;
623 	dev_set_name(dev, "nmem%d", nvdimm->id);
624 	dev->parent = &nvdimm_bus->dev;
625 	dev->type = &nvdimm_device_type;
626 	dev->devt = MKDEV(nvdimm_major, nvdimm->id);
627 	dev->groups = groups;
628 	nvdimm->sec.ops = sec_ops;
629 	nvdimm->fw_ops = fw_ops;
630 	nvdimm->sec.overwrite_tmo = 0;
631 	INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
632 	/*
633 	 * Security state must be initialized before device_add() for
634 	 * attribute visibility.
635 	 */
636 	/* get security state and extended (master) state */
637 	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
638 	nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
639 	nd_device_register(dev);
640 
641 	return nvdimm;
642 }
643 EXPORT_SYMBOL_GPL(__nvdimm_create);
644 
shutdown_security_notify(void * data)645 static void shutdown_security_notify(void *data)
646 {
647 	struct nvdimm *nvdimm = data;
648 
649 	sysfs_put(nvdimm->sec.overwrite_state);
650 }
651 
nvdimm_security_setup_events(struct device * dev)652 int nvdimm_security_setup_events(struct device *dev)
653 {
654 	struct nvdimm *nvdimm = to_nvdimm(dev);
655 
656 	if (!nvdimm->sec.flags || !nvdimm->sec.ops
657 			|| !nvdimm->sec.ops->overwrite)
658 		return 0;
659 	nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
660 	if (!nvdimm->sec.overwrite_state)
661 		return -ENOMEM;
662 
663 	return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
664 }
665 EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
666 
nvdimm_in_overwrite(struct nvdimm * nvdimm)667 int nvdimm_in_overwrite(struct nvdimm *nvdimm)
668 {
669 	return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
670 }
671 EXPORT_SYMBOL_GPL(nvdimm_in_overwrite);
672 
nvdimm_security_freeze(struct nvdimm * nvdimm)673 int nvdimm_security_freeze(struct nvdimm *nvdimm)
674 {
675 	int rc;
676 
677 	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
678 
679 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
680 		return -EOPNOTSUPP;
681 
682 	if (!nvdimm->sec.flags)
683 		return -EIO;
684 
685 	if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
686 		dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
687 		return -EBUSY;
688 	}
689 
690 	rc = nvdimm->sec.ops->freeze(nvdimm);
691 	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
692 
693 	return rc;
694 }
695 
dpa_align(struct nd_region * nd_region)696 static unsigned long dpa_align(struct nd_region *nd_region)
697 {
698 	struct device *dev = &nd_region->dev;
699 
700 	if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev),
701 				"bus lock required for capacity provision\n"))
702 		return 0;
703 	if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
704 				% nd_region->ndr_mappings,
705 				"invalid region align %#lx mappings: %d\n",
706 				nd_region->align, nd_region->ndr_mappings))
707 		return 0;
708 	return nd_region->align / nd_region->ndr_mappings;
709 }
710 
alias_dpa_busy(struct device * dev,void * data)711 int alias_dpa_busy(struct device *dev, void *data)
712 {
713 	resource_size_t map_end, blk_start, new;
714 	struct blk_alloc_info *info = data;
715 	struct nd_mapping *nd_mapping;
716 	struct nd_region *nd_region;
717 	struct nvdimm_drvdata *ndd;
718 	struct resource *res;
719 	unsigned long align;
720 	int i;
721 
722 	if (!is_memory(dev))
723 		return 0;
724 
725 	nd_region = to_nd_region(dev);
726 	for (i = 0; i < nd_region->ndr_mappings; i++) {
727 		nd_mapping  = &nd_region->mapping[i];
728 		if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
729 			break;
730 	}
731 
732 	if (i >= nd_region->ndr_mappings)
733 		return 0;
734 
735 	ndd = to_ndd(nd_mapping);
736 	map_end = nd_mapping->start + nd_mapping->size - 1;
737 	blk_start = nd_mapping->start;
738 
739 	/*
740 	 * In the allocation case ->res is set to free space that we are
741 	 * looking to validate against PMEM aliasing collision rules
742 	 * (i.e. BLK is allocated after all aliased PMEM).
743 	 */
744 	if (info->res) {
745 		if (info->res->start >= nd_mapping->start
746 				&& info->res->start < map_end)
747 			/* pass */;
748 		else
749 			return 0;
750 	}
751 
752  retry:
753 	/*
754 	 * Find the free dpa from the end of the last pmem allocation to
755 	 * the end of the interleave-set mapping.
756 	 */
757 	align = dpa_align(nd_region);
758 	if (!align)
759 		return 0;
760 
761 	for_each_dpa_resource(ndd, res) {
762 		resource_size_t start, end;
763 
764 		if (strncmp(res->name, "pmem", 4) != 0)
765 			continue;
766 
767 		start = ALIGN_DOWN(res->start, align);
768 		end = ALIGN(res->end + 1, align) - 1;
769 		if ((start >= blk_start && start < map_end)
770 				|| (end >= blk_start && end <= map_end)) {
771 			new = max(blk_start, min(map_end, end) + 1);
772 			if (new != blk_start) {
773 				blk_start = new;
774 				goto retry;
775 			}
776 		}
777 	}
778 
779 	/* update the free space range with the probed blk_start */
780 	if (info->res && blk_start > info->res->start) {
781 		info->res->start = max(info->res->start, blk_start);
782 		if (info->res->start > info->res->end)
783 			info->res->end = info->res->start - 1;
784 		return 1;
785 	}
786 
787 	info->available -= blk_start - nd_mapping->start;
788 
789 	return 0;
790 }
791 
792 /**
793  * nd_blk_available_dpa - account the unused dpa of BLK region
794  * @nd_mapping: container of dpa-resource-root + labels
795  *
796  * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
797  * we arrange for them to never start at an lower dpa than the last
798  * PMEM allocation in an aliased region.
799  */
nd_blk_available_dpa(struct nd_region * nd_region)800 resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
801 {
802 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
803 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
804 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
805 	struct blk_alloc_info info = {
806 		.nd_mapping = nd_mapping,
807 		.available = nd_mapping->size,
808 		.res = NULL,
809 	};
810 	struct resource *res;
811 	unsigned long align;
812 
813 	if (!ndd)
814 		return 0;
815 
816 	device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
817 
818 	/* now account for busy blk allocations in unaliased dpa */
819 	align = dpa_align(nd_region);
820 	if (!align)
821 		return 0;
822 	for_each_dpa_resource(ndd, res) {
823 		resource_size_t start, end, size;
824 
825 		if (strncmp(res->name, "blk", 3) != 0)
826 			continue;
827 		start = ALIGN_DOWN(res->start, align);
828 		end = ALIGN(res->end + 1, align) - 1;
829 		size = end - start + 1;
830 		if (size >= info.available)
831 			return 0;
832 		info.available -= size;
833 	}
834 
835 	return info.available;
836 }
837 
838 /**
839  * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
840  *			   contiguous unallocated dpa range.
841  * @nd_region: constrain available space check to this reference region
842  * @nd_mapping: container of dpa-resource-root + labels
843  */
nd_pmem_max_contiguous_dpa(struct nd_region * nd_region,struct nd_mapping * nd_mapping)844 resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
845 					   struct nd_mapping *nd_mapping)
846 {
847 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
848 	struct nvdimm_bus *nvdimm_bus;
849 	resource_size_t max = 0;
850 	struct resource *res;
851 	unsigned long align;
852 
853 	/* if a dimm is disabled the available capacity is zero */
854 	if (!ndd)
855 		return 0;
856 
857 	align = dpa_align(nd_region);
858 	if (!align)
859 		return 0;
860 
861 	nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
862 	if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
863 		return 0;
864 	for_each_dpa_resource(ndd, res) {
865 		resource_size_t start, end;
866 
867 		if (strcmp(res->name, "pmem-reserve") != 0)
868 			continue;
869 		/* trim free space relative to current alignment setting */
870 		start = ALIGN(res->start, align);
871 		end = ALIGN_DOWN(res->end + 1, align) - 1;
872 		if (end < start)
873 			continue;
874 		if (end - start + 1 > max)
875 			max = end - start + 1;
876 	}
877 	release_free_pmem(nvdimm_bus, nd_mapping);
878 	return max;
879 }
880 
881 /**
882  * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
883  * @nd_mapping: container of dpa-resource-root + labels
884  * @nd_region: constrain available space check to this reference region
885  * @overlap: calculate available space assuming this level of overlap
886  *
887  * Validate that a PMEM label, if present, aligns with the start of an
888  * interleave set and truncate the available size at the lowest BLK
889  * overlap point.
890  *
891  * The expectation is that this routine is called multiple times as it
892  * probes for the largest BLK encroachment for any single member DIMM of
893  * the interleave set.  Once that value is determined the PMEM-limit for
894  * the set can be established.
895  */
nd_pmem_available_dpa(struct nd_region * nd_region,struct nd_mapping * nd_mapping,resource_size_t * overlap)896 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
897 		struct nd_mapping *nd_mapping, resource_size_t *overlap)
898 {
899 	resource_size_t map_start, map_end, busy = 0, available, blk_start;
900 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
901 	struct resource *res;
902 	const char *reason;
903 	unsigned long align;
904 
905 	if (!ndd)
906 		return 0;
907 
908 	align = dpa_align(nd_region);
909 	if (!align)
910 		return 0;
911 
912 	map_start = nd_mapping->start;
913 	map_end = map_start + nd_mapping->size - 1;
914 	blk_start = max(map_start, map_end + 1 - *overlap);
915 	for_each_dpa_resource(ndd, res) {
916 		resource_size_t start, end;
917 
918 		start = ALIGN_DOWN(res->start, align);
919 		end = ALIGN(res->end + 1, align) - 1;
920 		if (start >= map_start && start < map_end) {
921 			if (strncmp(res->name, "blk", 3) == 0)
922 				blk_start = min(blk_start,
923 						max(map_start, start));
924 			else if (end > map_end) {
925 				reason = "misaligned to iset";
926 				goto err;
927 			} else
928 				busy += end - start + 1;
929 		} else if (end >= map_start && end <= map_end) {
930 			if (strncmp(res->name, "blk", 3) == 0) {
931 				/*
932 				 * If a BLK allocation overlaps the start of
933 				 * PMEM the entire interleave set may now only
934 				 * be used for BLK.
935 				 */
936 				blk_start = map_start;
937 			} else
938 				busy += end - start + 1;
939 		} else if (map_start > start && map_start < end) {
940 			/* total eclipse of the mapping */
941 			busy += nd_mapping->size;
942 			blk_start = map_start;
943 		}
944 	}
945 
946 	*overlap = map_end + 1 - blk_start;
947 	available = blk_start - map_start;
948 	if (busy < available)
949 		return ALIGN_DOWN(available - busy, align);
950 	return 0;
951 
952  err:
953 	nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
954 	return 0;
955 }
956 
nvdimm_free_dpa(struct nvdimm_drvdata * ndd,struct resource * res)957 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
958 {
959 	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
960 	kfree(res->name);
961 	__release_region(&ndd->dpa, res->start, resource_size(res));
962 }
963 
nvdimm_allocate_dpa(struct nvdimm_drvdata * ndd,struct nd_label_id * label_id,resource_size_t start,resource_size_t n)964 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
965 		struct nd_label_id *label_id, resource_size_t start,
966 		resource_size_t n)
967 {
968 	char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
969 	struct resource *res;
970 
971 	if (!name)
972 		return NULL;
973 
974 	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
975 	res = __request_region(&ndd->dpa, start, n, name, 0);
976 	if (!res)
977 		kfree(name);
978 	return res;
979 }
980 
981 /**
982  * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
983  * @nvdimm: container of dpa-resource-root + labels
984  * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
985  */
nvdimm_allocated_dpa(struct nvdimm_drvdata * ndd,struct nd_label_id * label_id)986 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
987 		struct nd_label_id *label_id)
988 {
989 	resource_size_t allocated = 0;
990 	struct resource *res;
991 
992 	for_each_dpa_resource(ndd, res)
993 		if (strcmp(res->name, label_id->id) == 0)
994 			allocated += resource_size(res);
995 
996 	return allocated;
997 }
998 
count_dimms(struct device * dev,void * c)999 static int count_dimms(struct device *dev, void *c)
1000 {
1001 	int *count = c;
1002 
1003 	if (is_nvdimm(dev))
1004 		(*count)++;
1005 	return 0;
1006 }
1007 
nvdimm_bus_check_dimm_count(struct nvdimm_bus * nvdimm_bus,int dimm_count)1008 int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
1009 {
1010 	int count = 0;
1011 	/* Flush any possible dimm registration failures */
1012 	nd_synchronize();
1013 
1014 	device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
1015 	dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
1016 	if (count != dimm_count)
1017 		return -ENXIO;
1018 	return 0;
1019 }
1020 EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
1021 
nvdimm_devs_exit(void)1022 void __exit nvdimm_devs_exit(void)
1023 {
1024 	ida_destroy(&dimm_ida);
1025 }
1026