• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * nvmem framework core.
4  *
5  * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6  * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7  */
8 
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21 
22 struct nvmem_device {
23 	struct module		*owner;
24 	struct device		dev;
25 	int			stride;
26 	int			word_size;
27 	int			id;
28 	struct kref		refcnt;
29 	size_t			size;
30 	bool			read_only;
31 	bool			root_only;
32 	int			flags;
33 	enum nvmem_type		type;
34 	struct bin_attribute	eeprom;
35 	struct device		*base_dev;
36 	struct list_head	cells;
37 	const struct nvmem_keepout *keepout;
38 	unsigned int		nkeepout;
39 	nvmem_reg_read_t	reg_read;
40 	nvmem_reg_write_t	reg_write;
41 	nvmem_cell_post_process_t cell_post_process;
42 	struct gpio_desc	*wp_gpio;
43 	void *priv;
44 };
45 
46 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
47 
48 #define FLAG_COMPAT		BIT(0)
49 struct nvmem_cell_entry {
50 	const char		*name;
51 	int			offset;
52 	int			bytes;
53 	int			bit_offset;
54 	int			nbits;
55 	struct device_node	*np;
56 	struct nvmem_device	*nvmem;
57 	struct list_head	node;
58 };
59 
60 struct nvmem_cell {
61 	struct nvmem_cell_entry *entry;
62 	const char		*id;
63 };
64 
65 static DEFINE_MUTEX(nvmem_mutex);
66 static DEFINE_IDA(nvmem_ida);
67 
68 static DEFINE_MUTEX(nvmem_cell_mutex);
69 static LIST_HEAD(nvmem_cell_tables);
70 
71 static DEFINE_MUTEX(nvmem_lookup_mutex);
72 static LIST_HEAD(nvmem_lookup_list);
73 
74 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
75 
__nvmem_reg_read(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)76 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
77 			    void *val, size_t bytes)
78 {
79 	if (nvmem->reg_read)
80 		return nvmem->reg_read(nvmem->priv, offset, val, bytes);
81 
82 	return -EINVAL;
83 }
84 
__nvmem_reg_write(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)85 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
86 			     void *val, size_t bytes)
87 {
88 	int ret;
89 
90 	if (nvmem->reg_write) {
91 		gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
92 		ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
93 		gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
94 		return ret;
95 	}
96 
97 	return -EINVAL;
98 }
99 
nvmem_access_with_keepouts(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes,int write)100 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
101 				      unsigned int offset, void *val,
102 				      size_t bytes, int write)
103 {
104 
105 	unsigned int end = offset + bytes;
106 	unsigned int kend, ksize;
107 	const struct nvmem_keepout *keepout = nvmem->keepout;
108 	const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
109 	int rc;
110 
111 	/*
112 	 * Skip all keepouts before the range being accessed.
113 	 * Keepouts are sorted.
114 	 */
115 	while ((keepout < keepoutend) && (keepout->end <= offset))
116 		keepout++;
117 
118 	while ((offset < end) && (keepout < keepoutend)) {
119 		/* Access the valid portion before the keepout. */
120 		if (offset < keepout->start) {
121 			kend = min(end, keepout->start);
122 			ksize = kend - offset;
123 			if (write)
124 				rc = __nvmem_reg_write(nvmem, offset, val, ksize);
125 			else
126 				rc = __nvmem_reg_read(nvmem, offset, val, ksize);
127 
128 			if (rc)
129 				return rc;
130 
131 			offset += ksize;
132 			val += ksize;
133 		}
134 
135 		/*
136 		 * Now we're aligned to the start of this keepout zone. Go
137 		 * through it.
138 		 */
139 		kend = min(end, keepout->end);
140 		ksize = kend - offset;
141 		if (!write)
142 			memset(val, keepout->value, ksize);
143 
144 		val += ksize;
145 		offset += ksize;
146 		keepout++;
147 	}
148 
149 	/*
150 	 * If we ran out of keepouts but there's still stuff to do, send it
151 	 * down directly
152 	 */
153 	if (offset < end) {
154 		ksize = end - offset;
155 		if (write)
156 			return __nvmem_reg_write(nvmem, offset, val, ksize);
157 		else
158 			return __nvmem_reg_read(nvmem, offset, val, ksize);
159 	}
160 
161 	return 0;
162 }
163 
nvmem_reg_read(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)164 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
165 			  void *val, size_t bytes)
166 {
167 	if (!nvmem->nkeepout)
168 		return __nvmem_reg_read(nvmem, offset, val, bytes);
169 
170 	return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
171 }
172 
nvmem_reg_write(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)173 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
174 			   void *val, size_t bytes)
175 {
176 	if (!nvmem->nkeepout)
177 		return __nvmem_reg_write(nvmem, offset, val, bytes);
178 
179 	return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
180 }
181 
182 #ifdef CONFIG_NVMEM_SYSFS
183 static const char * const nvmem_type_str[] = {
184 	[NVMEM_TYPE_UNKNOWN] = "Unknown",
185 	[NVMEM_TYPE_EEPROM] = "EEPROM",
186 	[NVMEM_TYPE_OTP] = "OTP",
187 	[NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
188 	[NVMEM_TYPE_FRAM] = "FRAM",
189 };
190 
191 #ifdef CONFIG_DEBUG_LOCK_ALLOC
192 static struct lock_class_key eeprom_lock_key;
193 #endif
194 
type_show(struct device * dev,struct device_attribute * attr,char * buf)195 static ssize_t type_show(struct device *dev,
196 			 struct device_attribute *attr, char *buf)
197 {
198 	struct nvmem_device *nvmem = to_nvmem_device(dev);
199 
200 	return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
201 }
202 
203 static DEVICE_ATTR_RO(type);
204 
205 static struct attribute *nvmem_attrs[] = {
206 	&dev_attr_type.attr,
207 	NULL,
208 };
209 
bin_attr_nvmem_read(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)210 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
211 				   struct bin_attribute *attr, char *buf,
212 				   loff_t pos, size_t count)
213 {
214 	struct device *dev;
215 	struct nvmem_device *nvmem;
216 	int rc;
217 
218 	if (attr->private)
219 		dev = attr->private;
220 	else
221 		dev = kobj_to_dev(kobj);
222 	nvmem = to_nvmem_device(dev);
223 
224 	/* Stop the user from reading */
225 	if (pos >= nvmem->size)
226 		return 0;
227 
228 	if (!IS_ALIGNED(pos, nvmem->stride))
229 		return -EINVAL;
230 
231 	if (count < nvmem->word_size)
232 		return -EINVAL;
233 
234 	if (pos + count > nvmem->size)
235 		count = nvmem->size - pos;
236 
237 	count = round_down(count, nvmem->word_size);
238 
239 	if (!nvmem->reg_read)
240 		return -EPERM;
241 
242 	rc = nvmem_reg_read(nvmem, pos, buf, count);
243 
244 	if (rc)
245 		return rc;
246 
247 	return count;
248 }
249 
bin_attr_nvmem_write(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)250 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
251 				    struct bin_attribute *attr, char *buf,
252 				    loff_t pos, size_t count)
253 {
254 	struct device *dev;
255 	struct nvmem_device *nvmem;
256 	int rc;
257 
258 	if (attr->private)
259 		dev = attr->private;
260 	else
261 		dev = kobj_to_dev(kobj);
262 	nvmem = to_nvmem_device(dev);
263 
264 	/* Stop the user from writing */
265 	if (pos >= nvmem->size)
266 		return -EFBIG;
267 
268 	if (!IS_ALIGNED(pos, nvmem->stride))
269 		return -EINVAL;
270 
271 	if (count < nvmem->word_size)
272 		return -EINVAL;
273 
274 	if (pos + count > nvmem->size)
275 		count = nvmem->size - pos;
276 
277 	count = round_down(count, nvmem->word_size);
278 
279 	if (!nvmem->reg_write)
280 		return -EPERM;
281 
282 	rc = nvmem_reg_write(nvmem, pos, buf, count);
283 
284 	if (rc)
285 		return rc;
286 
287 	return count;
288 }
289 
nvmem_bin_attr_get_umode(struct nvmem_device * nvmem)290 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
291 {
292 	umode_t mode = 0400;
293 
294 	if (!nvmem->root_only)
295 		mode |= 0044;
296 
297 	if (!nvmem->read_only)
298 		mode |= 0200;
299 
300 	if (!nvmem->reg_write)
301 		mode &= ~0200;
302 
303 	if (!nvmem->reg_read)
304 		mode &= ~0444;
305 
306 	return mode;
307 }
308 
nvmem_bin_attr_is_visible(struct kobject * kobj,struct bin_attribute * attr,int i)309 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
310 					 struct bin_attribute *attr, int i)
311 {
312 	struct device *dev = kobj_to_dev(kobj);
313 	struct nvmem_device *nvmem = to_nvmem_device(dev);
314 
315 	attr->size = nvmem->size;
316 
317 	return nvmem_bin_attr_get_umode(nvmem);
318 }
319 
320 /* default read/write permissions */
321 static struct bin_attribute bin_attr_rw_nvmem = {
322 	.attr	= {
323 		.name	= "nvmem",
324 		.mode	= 0644,
325 	},
326 	.read	= bin_attr_nvmem_read,
327 	.write	= bin_attr_nvmem_write,
328 };
329 
330 static struct bin_attribute *nvmem_bin_attributes[] = {
331 	&bin_attr_rw_nvmem,
332 	NULL,
333 };
334 
335 static const struct attribute_group nvmem_bin_group = {
336 	.bin_attrs	= nvmem_bin_attributes,
337 	.attrs		= nvmem_attrs,
338 	.is_bin_visible = nvmem_bin_attr_is_visible,
339 };
340 
341 static const struct attribute_group *nvmem_dev_groups[] = {
342 	&nvmem_bin_group,
343 	NULL,
344 };
345 
346 static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
347 	.attr	= {
348 		.name	= "eeprom",
349 	},
350 	.read	= bin_attr_nvmem_read,
351 	.write	= bin_attr_nvmem_write,
352 };
353 
354 /*
355  * nvmem_setup_compat() - Create an additional binary entry in
356  * drivers sys directory, to be backwards compatible with the older
357  * drivers/misc/eeprom drivers.
358  */
nvmem_sysfs_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)359 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
360 				    const struct nvmem_config *config)
361 {
362 	int rval;
363 
364 	if (!config->compat)
365 		return 0;
366 
367 	if (!config->base_dev)
368 		return -EINVAL;
369 
370 	if (config->type == NVMEM_TYPE_FRAM)
371 		bin_attr_nvmem_eeprom_compat.attr.name = "fram";
372 
373 	nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
374 	nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
375 	nvmem->eeprom.size = nvmem->size;
376 #ifdef CONFIG_DEBUG_LOCK_ALLOC
377 	nvmem->eeprom.attr.key = &eeprom_lock_key;
378 #endif
379 	nvmem->eeprom.private = &nvmem->dev;
380 	nvmem->base_dev = config->base_dev;
381 
382 	rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
383 	if (rval) {
384 		dev_err(&nvmem->dev,
385 			"Failed to create eeprom binary file %d\n", rval);
386 		return rval;
387 	}
388 
389 	nvmem->flags |= FLAG_COMPAT;
390 
391 	return 0;
392 }
393 
nvmem_sysfs_remove_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)394 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
395 			      const struct nvmem_config *config)
396 {
397 	if (config->compat)
398 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
399 }
400 
401 #else /* CONFIG_NVMEM_SYSFS */
402 
nvmem_sysfs_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)403 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
404 				    const struct nvmem_config *config)
405 {
406 	return -ENOSYS;
407 }
nvmem_sysfs_remove_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)408 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
409 				      const struct nvmem_config *config)
410 {
411 }
412 
413 #endif /* CONFIG_NVMEM_SYSFS */
414 
nvmem_release(struct device * dev)415 static void nvmem_release(struct device *dev)
416 {
417 	struct nvmem_device *nvmem = to_nvmem_device(dev);
418 
419 	ida_free(&nvmem_ida, nvmem->id);
420 	gpiod_put(nvmem->wp_gpio);
421 	kfree(nvmem);
422 }
423 
424 static const struct device_type nvmem_provider_type = {
425 	.release	= nvmem_release,
426 };
427 
428 static struct bus_type nvmem_bus_type = {
429 	.name		= "nvmem",
430 };
431 
nvmem_cell_entry_drop(struct nvmem_cell_entry * cell)432 static void nvmem_cell_entry_drop(struct nvmem_cell_entry *cell)
433 {
434 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
435 	mutex_lock(&nvmem_mutex);
436 	list_del(&cell->node);
437 	mutex_unlock(&nvmem_mutex);
438 	of_node_put(cell->np);
439 	kfree_const(cell->name);
440 	kfree(cell);
441 }
442 
nvmem_device_remove_all_cells(const struct nvmem_device * nvmem)443 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
444 {
445 	struct nvmem_cell_entry *cell, *p;
446 
447 	list_for_each_entry_safe(cell, p, &nvmem->cells, node)
448 		nvmem_cell_entry_drop(cell);
449 }
450 
nvmem_cell_entry_add(struct nvmem_cell_entry * cell)451 static void nvmem_cell_entry_add(struct nvmem_cell_entry *cell)
452 {
453 	mutex_lock(&nvmem_mutex);
454 	list_add_tail(&cell->node, &cell->nvmem->cells);
455 	mutex_unlock(&nvmem_mutex);
456 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
457 }
458 
nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell_entry * cell)459 static int nvmem_cell_info_to_nvmem_cell_entry_nodup(struct nvmem_device *nvmem,
460 						     const struct nvmem_cell_info *info,
461 						     struct nvmem_cell_entry *cell)
462 {
463 	cell->nvmem = nvmem;
464 	cell->offset = info->offset;
465 	cell->bytes = info->bytes;
466 	cell->name = info->name;
467 
468 	cell->bit_offset = info->bit_offset;
469 	cell->nbits = info->nbits;
470 	cell->np = info->np;
471 
472 	if (cell->nbits)
473 		cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
474 					   BITS_PER_BYTE);
475 
476 	if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
477 		dev_err(&nvmem->dev,
478 			"cell %s unaligned to nvmem stride %d\n",
479 			cell->name ?: "<unknown>", nvmem->stride);
480 		return -EINVAL;
481 	}
482 
483 	return 0;
484 }
485 
nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell_entry * cell)486 static int nvmem_cell_info_to_nvmem_cell_entry(struct nvmem_device *nvmem,
487 					       const struct nvmem_cell_info *info,
488 					       struct nvmem_cell_entry *cell)
489 {
490 	int err;
491 
492 	err = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, cell);
493 	if (err)
494 		return err;
495 
496 	cell->name = kstrdup_const(info->name, GFP_KERNEL);
497 	if (!cell->name)
498 		return -ENOMEM;
499 
500 	return 0;
501 }
502 
503 /**
504  * nvmem_add_cells() - Add cell information to an nvmem device
505  *
506  * @nvmem: nvmem device to add cells to.
507  * @info: nvmem cell info to add to the device
508  * @ncells: number of cells in info
509  *
510  * Return: 0 or negative error code on failure.
511  */
nvmem_add_cells(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,int ncells)512 static int nvmem_add_cells(struct nvmem_device *nvmem,
513 		    const struct nvmem_cell_info *info,
514 		    int ncells)
515 {
516 	struct nvmem_cell_entry **cells;
517 	int i, rval;
518 
519 	cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
520 	if (!cells)
521 		return -ENOMEM;
522 
523 	for (i = 0; i < ncells; i++) {
524 		cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
525 		if (!cells[i]) {
526 			rval = -ENOMEM;
527 			goto err;
528 		}
529 
530 		rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, &info[i], cells[i]);
531 		if (rval) {
532 			kfree(cells[i]);
533 			goto err;
534 		}
535 
536 		nvmem_cell_entry_add(cells[i]);
537 	}
538 
539 	/* remove tmp array */
540 	kfree(cells);
541 
542 	return 0;
543 err:
544 	while (i--)
545 		nvmem_cell_entry_drop(cells[i]);
546 
547 	kfree(cells);
548 
549 	return rval;
550 }
551 
552 /**
553  * nvmem_register_notifier() - Register a notifier block for nvmem events.
554  *
555  * @nb: notifier block to be called on nvmem events.
556  *
557  * Return: 0 on success, negative error number on failure.
558  */
nvmem_register_notifier(struct notifier_block * nb)559 int nvmem_register_notifier(struct notifier_block *nb)
560 {
561 	return blocking_notifier_chain_register(&nvmem_notifier, nb);
562 }
563 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
564 
565 /**
566  * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
567  *
568  * @nb: notifier block to be unregistered.
569  *
570  * Return: 0 on success, negative error number on failure.
571  */
nvmem_unregister_notifier(struct notifier_block * nb)572 int nvmem_unregister_notifier(struct notifier_block *nb)
573 {
574 	return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
575 }
576 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
577 
nvmem_add_cells_from_table(struct nvmem_device * nvmem)578 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
579 {
580 	const struct nvmem_cell_info *info;
581 	struct nvmem_cell_table *table;
582 	struct nvmem_cell_entry *cell;
583 	int rval = 0, i;
584 
585 	mutex_lock(&nvmem_cell_mutex);
586 	list_for_each_entry(table, &nvmem_cell_tables, node) {
587 		if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
588 			for (i = 0; i < table->ncells; i++) {
589 				info = &table->cells[i];
590 
591 				cell = kzalloc(sizeof(*cell), GFP_KERNEL);
592 				if (!cell) {
593 					rval = -ENOMEM;
594 					goto out;
595 				}
596 
597 				rval = nvmem_cell_info_to_nvmem_cell_entry(nvmem, info, cell);
598 				if (rval) {
599 					kfree(cell);
600 					goto out;
601 				}
602 
603 				nvmem_cell_entry_add(cell);
604 			}
605 		}
606 	}
607 
608 out:
609 	mutex_unlock(&nvmem_cell_mutex);
610 	return rval;
611 }
612 
613 static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_name(struct nvmem_device * nvmem,const char * cell_id)614 nvmem_find_cell_entry_by_name(struct nvmem_device *nvmem, const char *cell_id)
615 {
616 	struct nvmem_cell_entry *iter, *cell = NULL;
617 
618 	mutex_lock(&nvmem_mutex);
619 	list_for_each_entry(iter, &nvmem->cells, node) {
620 		if (strcmp(cell_id, iter->name) == 0) {
621 			cell = iter;
622 			break;
623 		}
624 	}
625 	mutex_unlock(&nvmem_mutex);
626 
627 	return cell;
628 }
629 
nvmem_validate_keepouts(struct nvmem_device * nvmem)630 static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
631 {
632 	unsigned int cur = 0;
633 	const struct nvmem_keepout *keepout = nvmem->keepout;
634 	const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
635 
636 	while (keepout < keepoutend) {
637 		/* Ensure keepouts are sorted and don't overlap. */
638 		if (keepout->start < cur) {
639 			dev_err(&nvmem->dev,
640 				"Keepout regions aren't sorted or overlap.\n");
641 
642 			return -ERANGE;
643 		}
644 
645 		if (keepout->end < keepout->start) {
646 			dev_err(&nvmem->dev,
647 				"Invalid keepout region.\n");
648 
649 			return -EINVAL;
650 		}
651 
652 		/*
653 		 * Validate keepouts (and holes between) don't violate
654 		 * word_size constraints.
655 		 */
656 		if ((keepout->end - keepout->start < nvmem->word_size) ||
657 		    ((keepout->start != cur) &&
658 		     (keepout->start - cur < nvmem->word_size))) {
659 
660 			dev_err(&nvmem->dev,
661 				"Keepout regions violate word_size constraints.\n");
662 
663 			return -ERANGE;
664 		}
665 
666 		/* Validate keepouts don't violate stride (alignment). */
667 		if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
668 		    !IS_ALIGNED(keepout->end, nvmem->stride)) {
669 
670 			dev_err(&nvmem->dev,
671 				"Keepout regions violate stride.\n");
672 
673 			return -EINVAL;
674 		}
675 
676 		cur = keepout->end;
677 		keepout++;
678 	}
679 
680 	return 0;
681 }
682 
nvmem_add_cells_from_of(struct nvmem_device * nvmem)683 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
684 {
685 	struct device_node *parent, *child;
686 	struct device *dev = &nvmem->dev;
687 	struct nvmem_cell_entry *cell;
688 	const __be32 *addr;
689 	int len;
690 
691 	parent = dev->of_node;
692 
693 	for_each_child_of_node(parent, child) {
694 		addr = of_get_property(child, "reg", &len);
695 		if (!addr)
696 			continue;
697 		if (len < 2 * sizeof(u32)) {
698 			dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
699 			of_node_put(child);
700 			return -EINVAL;
701 		}
702 
703 		cell = kzalloc(sizeof(*cell), GFP_KERNEL);
704 		if (!cell) {
705 			of_node_put(child);
706 			return -ENOMEM;
707 		}
708 
709 		cell->nvmem = nvmem;
710 		cell->offset = be32_to_cpup(addr++);
711 		cell->bytes = be32_to_cpup(addr);
712 		cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
713 
714 		addr = of_get_property(child, "bits", &len);
715 		if (addr && len == (2 * sizeof(u32))) {
716 			cell->bit_offset = be32_to_cpup(addr++);
717 			cell->nbits = be32_to_cpup(addr);
718 		}
719 
720 		if (cell->nbits)
721 			cell->bytes = DIV_ROUND_UP(
722 					cell->nbits + cell->bit_offset,
723 					BITS_PER_BYTE);
724 
725 		if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
726 			dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
727 				cell->name, nvmem->stride);
728 			/* Cells already added will be freed later. */
729 			kfree_const(cell->name);
730 			kfree(cell);
731 			of_node_put(child);
732 			return -EINVAL;
733 		}
734 
735 		cell->np = of_node_get(child);
736 		nvmem_cell_entry_add(cell);
737 	}
738 
739 	return 0;
740 }
741 
742 /**
743  * nvmem_register() - Register a nvmem device for given nvmem_config.
744  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
745  *
746  * @config: nvmem device configuration with which nvmem device is created.
747  *
748  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
749  * on success.
750  */
751 
nvmem_register(const struct nvmem_config * config)752 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
753 {
754 	struct nvmem_device *nvmem;
755 	int rval;
756 
757 	if (!config->dev)
758 		return ERR_PTR(-EINVAL);
759 
760 	if (!config->reg_read && !config->reg_write)
761 		return ERR_PTR(-EINVAL);
762 
763 	nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
764 	if (!nvmem)
765 		return ERR_PTR(-ENOMEM);
766 
767 	rval  = ida_alloc(&nvmem_ida, GFP_KERNEL);
768 	if (rval < 0) {
769 		kfree(nvmem);
770 		return ERR_PTR(rval);
771 	}
772 
773 	nvmem->id = rval;
774 
775 	nvmem->dev.type = &nvmem_provider_type;
776 	nvmem->dev.bus = &nvmem_bus_type;
777 	nvmem->dev.parent = config->dev;
778 
779 	device_initialize(&nvmem->dev);
780 
781 	if (!config->ignore_wp)
782 		nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
783 						    GPIOD_OUT_HIGH);
784 	if (IS_ERR(nvmem->wp_gpio)) {
785 		rval = PTR_ERR(nvmem->wp_gpio);
786 		nvmem->wp_gpio = NULL;
787 		goto err_put_device;
788 	}
789 
790 	kref_init(&nvmem->refcnt);
791 	INIT_LIST_HEAD(&nvmem->cells);
792 
793 	nvmem->owner = config->owner;
794 	if (!nvmem->owner && config->dev->driver)
795 		nvmem->owner = config->dev->driver->owner;
796 	nvmem->stride = config->stride ?: 1;
797 	nvmem->word_size = config->word_size ?: 1;
798 	nvmem->size = config->size;
799 	nvmem->root_only = config->root_only;
800 	nvmem->priv = config->priv;
801 	nvmem->type = config->type;
802 	nvmem->reg_read = config->reg_read;
803 	nvmem->reg_write = config->reg_write;
804 	nvmem->cell_post_process = config->cell_post_process;
805 	nvmem->keepout = config->keepout;
806 	nvmem->nkeepout = config->nkeepout;
807 	if (config->of_node)
808 		nvmem->dev.of_node = config->of_node;
809 	else if (!config->no_of_node)
810 		nvmem->dev.of_node = config->dev->of_node;
811 
812 	switch (config->id) {
813 	case NVMEM_DEVID_NONE:
814 		rval = dev_set_name(&nvmem->dev, "%s", config->name);
815 		break;
816 	case NVMEM_DEVID_AUTO:
817 		rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
818 		break;
819 	default:
820 		rval = dev_set_name(&nvmem->dev, "%s%d",
821 			     config->name ? : "nvmem",
822 			     config->name ? config->id : nvmem->id);
823 		break;
824 	}
825 
826 	if (rval)
827 		goto err_put_device;
828 
829 	nvmem->read_only = device_property_present(config->dev, "read-only") ||
830 			   config->read_only || !nvmem->reg_write;
831 
832 #ifdef CONFIG_NVMEM_SYSFS
833 	nvmem->dev.groups = nvmem_dev_groups;
834 #endif
835 
836 	if (nvmem->nkeepout) {
837 		rval = nvmem_validate_keepouts(nvmem);
838 		if (rval)
839 			goto err_put_device;
840 	}
841 
842 	if (config->compat) {
843 		rval = nvmem_sysfs_setup_compat(nvmem, config);
844 		if (rval)
845 			goto err_put_device;
846 	}
847 
848 	if (config->cells) {
849 		rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
850 		if (rval)
851 			goto err_remove_cells;
852 	}
853 
854 	rval = nvmem_add_cells_from_table(nvmem);
855 	if (rval)
856 		goto err_remove_cells;
857 
858 	rval = nvmem_add_cells_from_of(nvmem);
859 	if (rval)
860 		goto err_remove_cells;
861 
862 	dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
863 
864 	rval = device_add(&nvmem->dev);
865 	if (rval)
866 		goto err_remove_cells;
867 
868 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
869 
870 	return nvmem;
871 
872 err_remove_cells:
873 	nvmem_device_remove_all_cells(nvmem);
874 	if (config->compat)
875 		nvmem_sysfs_remove_compat(nvmem, config);
876 err_put_device:
877 	put_device(&nvmem->dev);
878 
879 	return ERR_PTR(rval);
880 }
881 EXPORT_SYMBOL_GPL(nvmem_register);
882 
nvmem_device_release(struct kref * kref)883 static void nvmem_device_release(struct kref *kref)
884 {
885 	struct nvmem_device *nvmem;
886 
887 	nvmem = container_of(kref, struct nvmem_device, refcnt);
888 
889 	blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
890 
891 	if (nvmem->flags & FLAG_COMPAT)
892 		device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
893 
894 	nvmem_device_remove_all_cells(nvmem);
895 	device_unregister(&nvmem->dev);
896 }
897 
898 /**
899  * nvmem_unregister() - Unregister previously registered nvmem device
900  *
901  * @nvmem: Pointer to previously registered nvmem device.
902  */
nvmem_unregister(struct nvmem_device * nvmem)903 void nvmem_unregister(struct nvmem_device *nvmem)
904 {
905 	if (nvmem)
906 		kref_put(&nvmem->refcnt, nvmem_device_release);
907 }
908 EXPORT_SYMBOL_GPL(nvmem_unregister);
909 
devm_nvmem_unregister(void * nvmem)910 static void devm_nvmem_unregister(void *nvmem)
911 {
912 	nvmem_unregister(nvmem);
913 }
914 
915 /**
916  * devm_nvmem_register() - Register a managed nvmem device for given
917  * nvmem_config.
918  * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
919  *
920  * @dev: Device that uses the nvmem device.
921  * @config: nvmem device configuration with which nvmem device is created.
922  *
923  * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
924  * on success.
925  */
devm_nvmem_register(struct device * dev,const struct nvmem_config * config)926 struct nvmem_device *devm_nvmem_register(struct device *dev,
927 					 const struct nvmem_config *config)
928 {
929 	struct nvmem_device *nvmem;
930 	int ret;
931 
932 	nvmem = nvmem_register(config);
933 	if (IS_ERR(nvmem))
934 		return nvmem;
935 
936 	ret = devm_add_action_or_reset(dev, devm_nvmem_unregister, nvmem);
937 	if (ret)
938 		return ERR_PTR(ret);
939 
940 	return nvmem;
941 }
942 EXPORT_SYMBOL_GPL(devm_nvmem_register);
943 
__nvmem_device_get(void * data,int (* match)(struct device * dev,const void * data))944 static struct nvmem_device *__nvmem_device_get(void *data,
945 			int (*match)(struct device *dev, const void *data))
946 {
947 	struct nvmem_device *nvmem = NULL;
948 	struct device *dev;
949 
950 	mutex_lock(&nvmem_mutex);
951 	dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
952 	if (dev)
953 		nvmem = to_nvmem_device(dev);
954 	mutex_unlock(&nvmem_mutex);
955 	if (!nvmem)
956 		return ERR_PTR(-EPROBE_DEFER);
957 
958 	if (!try_module_get(nvmem->owner)) {
959 		dev_err(&nvmem->dev,
960 			"could not increase module refcount for cell %s\n",
961 			nvmem_dev_name(nvmem));
962 
963 		put_device(&nvmem->dev);
964 		return ERR_PTR(-EINVAL);
965 	}
966 
967 	kref_get(&nvmem->refcnt);
968 
969 	return nvmem;
970 }
971 
__nvmem_device_put(struct nvmem_device * nvmem)972 static void __nvmem_device_put(struct nvmem_device *nvmem)
973 {
974 	put_device(&nvmem->dev);
975 	module_put(nvmem->owner);
976 	kref_put(&nvmem->refcnt, nvmem_device_release);
977 }
978 
979 #if IS_ENABLED(CONFIG_OF)
980 /**
981  * of_nvmem_device_get() - Get nvmem device from a given id
982  *
983  * @np: Device tree node that uses the nvmem device.
984  * @id: nvmem name from nvmem-names property.
985  *
986  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
987  * on success.
988  */
of_nvmem_device_get(struct device_node * np,const char * id)989 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
990 {
991 
992 	struct device_node *nvmem_np;
993 	struct nvmem_device *nvmem;
994 	int index = 0;
995 
996 	if (id)
997 		index = of_property_match_string(np, "nvmem-names", id);
998 
999 	nvmem_np = of_parse_phandle(np, "nvmem", index);
1000 	if (!nvmem_np)
1001 		return ERR_PTR(-ENOENT);
1002 
1003 	nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1004 	of_node_put(nvmem_np);
1005 	return nvmem;
1006 }
1007 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
1008 #endif
1009 
1010 /**
1011  * nvmem_device_get() - Get nvmem device from a given id
1012  *
1013  * @dev: Device that uses the nvmem device.
1014  * @dev_name: name of the requested nvmem device.
1015  *
1016  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1017  * on success.
1018  */
nvmem_device_get(struct device * dev,const char * dev_name)1019 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
1020 {
1021 	if (dev->of_node) { /* try dt first */
1022 		struct nvmem_device *nvmem;
1023 
1024 		nvmem = of_nvmem_device_get(dev->of_node, dev_name);
1025 
1026 		if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
1027 			return nvmem;
1028 
1029 	}
1030 
1031 	return __nvmem_device_get((void *)dev_name, device_match_name);
1032 }
1033 EXPORT_SYMBOL_GPL(nvmem_device_get);
1034 
1035 /**
1036  * nvmem_device_find() - Find nvmem device with matching function
1037  *
1038  * @data: Data to pass to match function
1039  * @match: Callback function to check device
1040  *
1041  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1042  * on success.
1043  */
nvmem_device_find(void * data,int (* match)(struct device * dev,const void * data))1044 struct nvmem_device *nvmem_device_find(void *data,
1045 			int (*match)(struct device *dev, const void *data))
1046 {
1047 	return __nvmem_device_get(data, match);
1048 }
1049 EXPORT_SYMBOL_GPL(nvmem_device_find);
1050 
devm_nvmem_device_match(struct device * dev,void * res,void * data)1051 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
1052 {
1053 	struct nvmem_device **nvmem = res;
1054 
1055 	if (WARN_ON(!nvmem || !*nvmem))
1056 		return 0;
1057 
1058 	return *nvmem == data;
1059 }
1060 
devm_nvmem_device_release(struct device * dev,void * res)1061 static void devm_nvmem_device_release(struct device *dev, void *res)
1062 {
1063 	nvmem_device_put(*(struct nvmem_device **)res);
1064 }
1065 
1066 /**
1067  * devm_nvmem_device_put() - put alredy got nvmem device
1068  *
1069  * @dev: Device that uses the nvmem device.
1070  * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
1071  * that needs to be released.
1072  */
devm_nvmem_device_put(struct device * dev,struct nvmem_device * nvmem)1073 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
1074 {
1075 	int ret;
1076 
1077 	ret = devres_release(dev, devm_nvmem_device_release,
1078 			     devm_nvmem_device_match, nvmem);
1079 
1080 	WARN_ON(ret);
1081 }
1082 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
1083 
1084 /**
1085  * nvmem_device_put() - put alredy got nvmem device
1086  *
1087  * @nvmem: pointer to nvmem device that needs to be released.
1088  */
nvmem_device_put(struct nvmem_device * nvmem)1089 void nvmem_device_put(struct nvmem_device *nvmem)
1090 {
1091 	__nvmem_device_put(nvmem);
1092 }
1093 EXPORT_SYMBOL_GPL(nvmem_device_put);
1094 
1095 /**
1096  * devm_nvmem_device_get() - Get nvmem cell of device form a given id
1097  *
1098  * @dev: Device that requests the nvmem device.
1099  * @id: name id for the requested nvmem device.
1100  *
1101  * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
1102  * on success.  The nvmem_cell will be freed by the automatically once the
1103  * device is freed.
1104  */
devm_nvmem_device_get(struct device * dev,const char * id)1105 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
1106 {
1107 	struct nvmem_device **ptr, *nvmem;
1108 
1109 	ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
1110 	if (!ptr)
1111 		return ERR_PTR(-ENOMEM);
1112 
1113 	nvmem = nvmem_device_get(dev, id);
1114 	if (!IS_ERR(nvmem)) {
1115 		*ptr = nvmem;
1116 		devres_add(dev, ptr);
1117 	} else {
1118 		devres_free(ptr);
1119 	}
1120 
1121 	return nvmem;
1122 }
1123 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
1124 
nvmem_create_cell(struct nvmem_cell_entry * entry,const char * id)1125 static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, const char *id)
1126 {
1127 	struct nvmem_cell *cell;
1128 	const char *name = NULL;
1129 
1130 	cell = kzalloc(sizeof(*cell), GFP_KERNEL);
1131 	if (!cell)
1132 		return ERR_PTR(-ENOMEM);
1133 
1134 	if (id) {
1135 		name = kstrdup_const(id, GFP_KERNEL);
1136 		if (!name) {
1137 			kfree(cell);
1138 			return ERR_PTR(-ENOMEM);
1139 		}
1140 	}
1141 
1142 	cell->id = name;
1143 	cell->entry = entry;
1144 
1145 	return cell;
1146 }
1147 
1148 static struct nvmem_cell *
nvmem_cell_get_from_lookup(struct device * dev,const char * con_id)1149 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
1150 {
1151 	struct nvmem_cell_entry *cell_entry;
1152 	struct nvmem_cell *cell = ERR_PTR(-ENOENT);
1153 	struct nvmem_cell_lookup *lookup;
1154 	struct nvmem_device *nvmem;
1155 	const char *dev_id;
1156 
1157 	if (!dev)
1158 		return ERR_PTR(-EINVAL);
1159 
1160 	dev_id = dev_name(dev);
1161 
1162 	mutex_lock(&nvmem_lookup_mutex);
1163 
1164 	list_for_each_entry(lookup, &nvmem_lookup_list, node) {
1165 		if ((strcmp(lookup->dev_id, dev_id) == 0) &&
1166 		    (strcmp(lookup->con_id, con_id) == 0)) {
1167 			/* This is the right entry. */
1168 			nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
1169 						   device_match_name);
1170 			if (IS_ERR(nvmem)) {
1171 				/* Provider may not be registered yet. */
1172 				cell = ERR_CAST(nvmem);
1173 				break;
1174 			}
1175 
1176 			cell_entry = nvmem_find_cell_entry_by_name(nvmem,
1177 								   lookup->cell_name);
1178 			if (!cell_entry) {
1179 				__nvmem_device_put(nvmem);
1180 				cell = ERR_PTR(-ENOENT);
1181 			} else {
1182 				cell = nvmem_create_cell(cell_entry, con_id);
1183 				if (IS_ERR(cell))
1184 					__nvmem_device_put(nvmem);
1185 			}
1186 			break;
1187 		}
1188 	}
1189 
1190 	mutex_unlock(&nvmem_lookup_mutex);
1191 	return cell;
1192 }
1193 
1194 #if IS_ENABLED(CONFIG_OF)
1195 static struct nvmem_cell_entry *
nvmem_find_cell_entry_by_node(struct nvmem_device * nvmem,struct device_node * np)1196 nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np)
1197 {
1198 	struct nvmem_cell_entry *iter, *cell = NULL;
1199 
1200 	mutex_lock(&nvmem_mutex);
1201 	list_for_each_entry(iter, &nvmem->cells, node) {
1202 		if (np == iter->np) {
1203 			cell = iter;
1204 			break;
1205 		}
1206 	}
1207 	mutex_unlock(&nvmem_mutex);
1208 
1209 	return cell;
1210 }
1211 
1212 /**
1213  * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1214  *
1215  * @np: Device tree node that uses the nvmem cell.
1216  * @id: nvmem cell name from nvmem-cell-names property, or NULL
1217  *      for the cell at index 0 (the lone cell with no accompanying
1218  *      nvmem-cell-names property).
1219  *
1220  * Return: Will be an ERR_PTR() on error or a valid pointer
1221  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1222  * nvmem_cell_put().
1223  */
of_nvmem_cell_get(struct device_node * np,const char * id)1224 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1225 {
1226 	struct device_node *cell_np, *nvmem_np;
1227 	struct nvmem_device *nvmem;
1228 	struct nvmem_cell_entry *cell_entry;
1229 	struct nvmem_cell *cell;
1230 	int index = 0;
1231 
1232 	/* if cell name exists, find index to the name */
1233 	if (id)
1234 		index = of_property_match_string(np, "nvmem-cell-names", id);
1235 
1236 	cell_np = of_parse_phandle(np, "nvmem-cells", index);
1237 	if (!cell_np)
1238 		return ERR_PTR(-ENOENT);
1239 
1240 	nvmem_np = of_get_parent(cell_np);
1241 	if (!nvmem_np) {
1242 		of_node_put(cell_np);
1243 		return ERR_PTR(-EINVAL);
1244 	}
1245 
1246 	nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1247 	of_node_put(nvmem_np);
1248 	if (IS_ERR(nvmem)) {
1249 		of_node_put(cell_np);
1250 		return ERR_CAST(nvmem);
1251 	}
1252 
1253 	cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np);
1254 	of_node_put(cell_np);
1255 	if (!cell_entry) {
1256 		__nvmem_device_put(nvmem);
1257 		return ERR_PTR(-ENOENT);
1258 	}
1259 
1260 	cell = nvmem_create_cell(cell_entry, id);
1261 	if (IS_ERR(cell))
1262 		__nvmem_device_put(nvmem);
1263 
1264 	return cell;
1265 }
1266 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1267 #endif
1268 
1269 /**
1270  * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1271  *
1272  * @dev: Device that requests the nvmem cell.
1273  * @id: nvmem cell name to get (this corresponds with the name from the
1274  *      nvmem-cell-names property for DT systems and with the con_id from
1275  *      the lookup entry for non-DT systems).
1276  *
1277  * Return: Will be an ERR_PTR() on error or a valid pointer
1278  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1279  * nvmem_cell_put().
1280  */
nvmem_cell_get(struct device * dev,const char * id)1281 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1282 {
1283 	struct nvmem_cell *cell;
1284 
1285 	if (dev->of_node) { /* try dt first */
1286 		cell = of_nvmem_cell_get(dev->of_node, id);
1287 		if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1288 			return cell;
1289 	}
1290 
1291 	/* NULL cell id only allowed for device tree; invalid otherwise */
1292 	if (!id)
1293 		return ERR_PTR(-EINVAL);
1294 
1295 	return nvmem_cell_get_from_lookup(dev, id);
1296 }
1297 EXPORT_SYMBOL_GPL(nvmem_cell_get);
1298 
devm_nvmem_cell_release(struct device * dev,void * res)1299 static void devm_nvmem_cell_release(struct device *dev, void *res)
1300 {
1301 	nvmem_cell_put(*(struct nvmem_cell **)res);
1302 }
1303 
1304 /**
1305  * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1306  *
1307  * @dev: Device that requests the nvmem cell.
1308  * @id: nvmem cell name id to get.
1309  *
1310  * Return: Will be an ERR_PTR() on error or a valid pointer
1311  * to a struct nvmem_cell.  The nvmem_cell will be freed by the
1312  * automatically once the device is freed.
1313  */
devm_nvmem_cell_get(struct device * dev,const char * id)1314 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1315 {
1316 	struct nvmem_cell **ptr, *cell;
1317 
1318 	ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1319 	if (!ptr)
1320 		return ERR_PTR(-ENOMEM);
1321 
1322 	cell = nvmem_cell_get(dev, id);
1323 	if (!IS_ERR(cell)) {
1324 		*ptr = cell;
1325 		devres_add(dev, ptr);
1326 	} else {
1327 		devres_free(ptr);
1328 	}
1329 
1330 	return cell;
1331 }
1332 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1333 
devm_nvmem_cell_match(struct device * dev,void * res,void * data)1334 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1335 {
1336 	struct nvmem_cell **c = res;
1337 
1338 	if (WARN_ON(!c || !*c))
1339 		return 0;
1340 
1341 	return *c == data;
1342 }
1343 
1344 /**
1345  * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1346  * from devm_nvmem_cell_get.
1347  *
1348  * @dev: Device that requests the nvmem cell.
1349  * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1350  */
devm_nvmem_cell_put(struct device * dev,struct nvmem_cell * cell)1351 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1352 {
1353 	int ret;
1354 
1355 	ret = devres_release(dev, devm_nvmem_cell_release,
1356 				devm_nvmem_cell_match, cell);
1357 
1358 	WARN_ON(ret);
1359 }
1360 EXPORT_SYMBOL(devm_nvmem_cell_put);
1361 
1362 /**
1363  * nvmem_cell_put() - Release previously allocated nvmem cell.
1364  *
1365  * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1366  */
nvmem_cell_put(struct nvmem_cell * cell)1367 void nvmem_cell_put(struct nvmem_cell *cell)
1368 {
1369 	struct nvmem_device *nvmem = cell->entry->nvmem;
1370 
1371 	if (cell->id)
1372 		kfree_const(cell->id);
1373 
1374 	kfree(cell);
1375 	__nvmem_device_put(nvmem);
1376 }
1377 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1378 
nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry * cell,void * buf)1379 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell_entry *cell, void *buf)
1380 {
1381 	u8 *p, *b;
1382 	int i, extra, bit_offset = cell->bit_offset;
1383 
1384 	p = b = buf;
1385 	if (bit_offset) {
1386 		/* First shift */
1387 		*b++ >>= bit_offset;
1388 
1389 		/* setup rest of the bytes if any */
1390 		for (i = 1; i < cell->bytes; i++) {
1391 			/* Get bits from next byte and shift them towards msb */
1392 			*p |= *b << (BITS_PER_BYTE - bit_offset);
1393 
1394 			p = b;
1395 			*b++ >>= bit_offset;
1396 		}
1397 	} else {
1398 		/* point to the msb */
1399 		p += cell->bytes - 1;
1400 	}
1401 
1402 	/* result fits in less bytes */
1403 	extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1404 	while (--extra >= 0)
1405 		*p-- = 0;
1406 
1407 	/* clear msb bits if any leftover in the last byte */
1408 	if (cell->nbits % BITS_PER_BYTE)
1409 		*p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
1410 }
1411 
__nvmem_cell_read(struct nvmem_device * nvmem,struct nvmem_cell_entry * cell,void * buf,size_t * len,const char * id)1412 static int __nvmem_cell_read(struct nvmem_device *nvmem,
1413 		      struct nvmem_cell_entry *cell,
1414 		      void *buf, size_t *len, const char *id)
1415 {
1416 	int rc;
1417 
1418 	rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
1419 
1420 	if (rc)
1421 		return rc;
1422 
1423 	/* shift bits in-place */
1424 	if (cell->bit_offset || cell->nbits)
1425 		nvmem_shift_read_buffer_in_place(cell, buf);
1426 
1427 	if (nvmem->cell_post_process) {
1428 		rc = nvmem->cell_post_process(nvmem->priv, id,
1429 					      cell->offset, buf, cell->bytes);
1430 		if (rc)
1431 			return rc;
1432 	}
1433 
1434 	if (len)
1435 		*len = cell->bytes;
1436 
1437 	return 0;
1438 }
1439 
1440 /**
1441  * nvmem_cell_read() - Read a given nvmem cell
1442  *
1443  * @cell: nvmem cell to be read.
1444  * @len: pointer to length of cell which will be populated on successful read;
1445  *	 can be NULL.
1446  *
1447  * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1448  * buffer should be freed by the consumer with a kfree().
1449  */
nvmem_cell_read(struct nvmem_cell * cell,size_t * len)1450 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1451 {
1452 	struct nvmem_device *nvmem = cell->entry->nvmem;
1453 	u8 *buf;
1454 	int rc;
1455 
1456 	if (!nvmem)
1457 		return ERR_PTR(-EINVAL);
1458 
1459 	buf = kzalloc(cell->entry->bytes, GFP_KERNEL);
1460 	if (!buf)
1461 		return ERR_PTR(-ENOMEM);
1462 
1463 	rc = __nvmem_cell_read(nvmem, cell->entry, buf, len, cell->id);
1464 	if (rc) {
1465 		kfree(buf);
1466 		return ERR_PTR(rc);
1467 	}
1468 
1469 	return buf;
1470 }
1471 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1472 
nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry * cell,u8 * _buf,int len)1473 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell_entry *cell,
1474 					     u8 *_buf, int len)
1475 {
1476 	struct nvmem_device *nvmem = cell->nvmem;
1477 	int i, rc, nbits, bit_offset = cell->bit_offset;
1478 	u8 v, *p, *buf, *b, pbyte, pbits;
1479 
1480 	nbits = cell->nbits;
1481 	buf = kzalloc(cell->bytes, GFP_KERNEL);
1482 	if (!buf)
1483 		return ERR_PTR(-ENOMEM);
1484 
1485 	memcpy(buf, _buf, len);
1486 	p = b = buf;
1487 
1488 	if (bit_offset) {
1489 		pbyte = *b;
1490 		*b <<= bit_offset;
1491 
1492 		/* setup the first byte with lsb bits from nvmem */
1493 		rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1494 		if (rc)
1495 			goto err;
1496 		*b++ |= GENMASK(bit_offset - 1, 0) & v;
1497 
1498 		/* setup rest of the byte if any */
1499 		for (i = 1; i < cell->bytes; i++) {
1500 			/* Get last byte bits and shift them towards lsb */
1501 			pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1502 			pbyte = *b;
1503 			p = b;
1504 			*b <<= bit_offset;
1505 			*b++ |= pbits;
1506 		}
1507 	}
1508 
1509 	/* if it's not end on byte boundary */
1510 	if ((nbits + bit_offset) % BITS_PER_BYTE) {
1511 		/* setup the last byte with msb bits from nvmem */
1512 		rc = nvmem_reg_read(nvmem,
1513 				    cell->offset + cell->bytes - 1, &v, 1);
1514 		if (rc)
1515 			goto err;
1516 		*p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1517 
1518 	}
1519 
1520 	return buf;
1521 err:
1522 	kfree(buf);
1523 	return ERR_PTR(rc);
1524 }
1525 
__nvmem_cell_entry_write(struct nvmem_cell_entry * cell,void * buf,size_t len)1526 static int __nvmem_cell_entry_write(struct nvmem_cell_entry *cell, void *buf, size_t len)
1527 {
1528 	struct nvmem_device *nvmem = cell->nvmem;
1529 	int rc;
1530 
1531 	if (!nvmem || nvmem->read_only ||
1532 	    (cell->bit_offset == 0 && len != cell->bytes))
1533 		return -EINVAL;
1534 
1535 	if (cell->bit_offset || cell->nbits) {
1536 		buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1537 		if (IS_ERR(buf))
1538 			return PTR_ERR(buf);
1539 	}
1540 
1541 	rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1542 
1543 	/* free the tmp buffer */
1544 	if (cell->bit_offset || cell->nbits)
1545 		kfree(buf);
1546 
1547 	if (rc)
1548 		return rc;
1549 
1550 	return len;
1551 }
1552 
1553 /**
1554  * nvmem_cell_write() - Write to a given nvmem cell
1555  *
1556  * @cell: nvmem cell to be written.
1557  * @buf: Buffer to be written.
1558  * @len: length of buffer to be written to nvmem cell.
1559  *
1560  * Return: length of bytes written or negative on failure.
1561  */
nvmem_cell_write(struct nvmem_cell * cell,void * buf,size_t len)1562 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1563 {
1564 	return __nvmem_cell_entry_write(cell->entry, buf, len);
1565 }
1566 
1567 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1568 
nvmem_cell_read_common(struct device * dev,const char * cell_id,void * val,size_t count)1569 static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1570 				  void *val, size_t count)
1571 {
1572 	struct nvmem_cell *cell;
1573 	void *buf;
1574 	size_t len;
1575 
1576 	cell = nvmem_cell_get(dev, cell_id);
1577 	if (IS_ERR(cell))
1578 		return PTR_ERR(cell);
1579 
1580 	buf = nvmem_cell_read(cell, &len);
1581 	if (IS_ERR(buf)) {
1582 		nvmem_cell_put(cell);
1583 		return PTR_ERR(buf);
1584 	}
1585 	if (len != count) {
1586 		kfree(buf);
1587 		nvmem_cell_put(cell);
1588 		return -EINVAL;
1589 	}
1590 	memcpy(val, buf, count);
1591 	kfree(buf);
1592 	nvmem_cell_put(cell);
1593 
1594 	return 0;
1595 }
1596 
1597 /**
1598  * nvmem_cell_read_u8() - Read a cell value as a u8
1599  *
1600  * @dev: Device that requests the nvmem cell.
1601  * @cell_id: Name of nvmem cell to read.
1602  * @val: pointer to output value.
1603  *
1604  * Return: 0 on success or negative errno.
1605  */
nvmem_cell_read_u8(struct device * dev,const char * cell_id,u8 * val)1606 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
1607 {
1608 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1609 }
1610 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
1611 
1612 /**
1613  * nvmem_cell_read_u16() - Read a cell value as a u16
1614  *
1615  * @dev: Device that requests the nvmem cell.
1616  * @cell_id: Name of nvmem cell to read.
1617  * @val: pointer to output value.
1618  *
1619  * Return: 0 on success or negative errno.
1620  */
nvmem_cell_read_u16(struct device * dev,const char * cell_id,u16 * val)1621 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1622 {
1623 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1624 }
1625 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1626 
1627 /**
1628  * nvmem_cell_read_u32() - Read a cell value as a u32
1629  *
1630  * @dev: Device that requests the nvmem cell.
1631  * @cell_id: Name of nvmem cell to read.
1632  * @val: pointer to output value.
1633  *
1634  * Return: 0 on success or negative errno.
1635  */
nvmem_cell_read_u32(struct device * dev,const char * cell_id,u32 * val)1636 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1637 {
1638 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1639 }
1640 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1641 
1642 /**
1643  * nvmem_cell_read_u64() - Read a cell value as a u64
1644  *
1645  * @dev: Device that requests the nvmem cell.
1646  * @cell_id: Name of nvmem cell to read.
1647  * @val: pointer to output value.
1648  *
1649  * Return: 0 on success or negative errno.
1650  */
nvmem_cell_read_u64(struct device * dev,const char * cell_id,u64 * val)1651 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1652 {
1653 	return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1654 }
1655 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1656 
nvmem_cell_read_variable_common(struct device * dev,const char * cell_id,size_t max_len,size_t * len)1657 static const void *nvmem_cell_read_variable_common(struct device *dev,
1658 						   const char *cell_id,
1659 						   size_t max_len, size_t *len)
1660 {
1661 	struct nvmem_cell *cell;
1662 	int nbits;
1663 	void *buf;
1664 
1665 	cell = nvmem_cell_get(dev, cell_id);
1666 	if (IS_ERR(cell))
1667 		return cell;
1668 
1669 	nbits = cell->entry->nbits;
1670 	buf = nvmem_cell_read(cell, len);
1671 	nvmem_cell_put(cell);
1672 	if (IS_ERR(buf))
1673 		return buf;
1674 
1675 	/*
1676 	 * If nbits is set then nvmem_cell_read() can significantly exaggerate
1677 	 * the length of the real data. Throw away the extra junk.
1678 	 */
1679 	if (nbits)
1680 		*len = DIV_ROUND_UP(nbits, 8);
1681 
1682 	if (*len > max_len) {
1683 		kfree(buf);
1684 		return ERR_PTR(-ERANGE);
1685 	}
1686 
1687 	return buf;
1688 }
1689 
1690 /**
1691  * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
1692  *
1693  * @dev: Device that requests the nvmem cell.
1694  * @cell_id: Name of nvmem cell to read.
1695  * @val: pointer to output value.
1696  *
1697  * Return: 0 on success or negative errno.
1698  */
nvmem_cell_read_variable_le_u32(struct device * dev,const char * cell_id,u32 * val)1699 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
1700 				    u32 *val)
1701 {
1702 	size_t len;
1703 	const u8 *buf;
1704 	int i;
1705 
1706 	buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1707 	if (IS_ERR(buf))
1708 		return PTR_ERR(buf);
1709 
1710 	/* Copy w/ implicit endian conversion */
1711 	*val = 0;
1712 	for (i = 0; i < len; i++)
1713 		*val |= buf[i] << (8 * i);
1714 
1715 	kfree(buf);
1716 
1717 	return 0;
1718 }
1719 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
1720 
1721 /**
1722  * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
1723  *
1724  * @dev: Device that requests the nvmem cell.
1725  * @cell_id: Name of nvmem cell to read.
1726  * @val: pointer to output value.
1727  *
1728  * Return: 0 on success or negative errno.
1729  */
nvmem_cell_read_variable_le_u64(struct device * dev,const char * cell_id,u64 * val)1730 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
1731 				    u64 *val)
1732 {
1733 	size_t len;
1734 	const u8 *buf;
1735 	int i;
1736 
1737 	buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1738 	if (IS_ERR(buf))
1739 		return PTR_ERR(buf);
1740 
1741 	/* Copy w/ implicit endian conversion */
1742 	*val = 0;
1743 	for (i = 0; i < len; i++)
1744 		*val |= (uint64_t)buf[i] << (8 * i);
1745 
1746 	kfree(buf);
1747 
1748 	return 0;
1749 }
1750 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
1751 
1752 /**
1753  * nvmem_device_cell_read() - Read a given nvmem device and cell
1754  *
1755  * @nvmem: nvmem device to read from.
1756  * @info: nvmem cell info to be read.
1757  * @buf: buffer pointer which will be populated on successful read.
1758  *
1759  * Return: length of successful bytes read on success and negative
1760  * error code on error.
1761  */
nvmem_device_cell_read(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1762 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1763 			   struct nvmem_cell_info *info, void *buf)
1764 {
1765 	struct nvmem_cell_entry cell;
1766 	int rc;
1767 	ssize_t len;
1768 
1769 	if (!nvmem)
1770 		return -EINVAL;
1771 
1772 	rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1773 	if (rc)
1774 		return rc;
1775 
1776 	rc = __nvmem_cell_read(nvmem, &cell, buf, &len, NULL);
1777 	if (rc)
1778 		return rc;
1779 
1780 	return len;
1781 }
1782 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1783 
1784 /**
1785  * nvmem_device_cell_write() - Write cell to a given nvmem device
1786  *
1787  * @nvmem: nvmem device to be written to.
1788  * @info: nvmem cell info to be written.
1789  * @buf: buffer to be written to cell.
1790  *
1791  * Return: length of bytes written or negative error code on failure.
1792  */
nvmem_device_cell_write(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1793 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1794 			    struct nvmem_cell_info *info, void *buf)
1795 {
1796 	struct nvmem_cell_entry cell;
1797 	int rc;
1798 
1799 	if (!nvmem)
1800 		return -EINVAL;
1801 
1802 	rc = nvmem_cell_info_to_nvmem_cell_entry_nodup(nvmem, info, &cell);
1803 	if (rc)
1804 		return rc;
1805 
1806 	return __nvmem_cell_entry_write(&cell, buf, cell.bytes);
1807 }
1808 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1809 
1810 /**
1811  * nvmem_device_read() - Read from a given nvmem device
1812  *
1813  * @nvmem: nvmem device to read from.
1814  * @offset: offset in nvmem device.
1815  * @bytes: number of bytes to read.
1816  * @buf: buffer pointer which will be populated on successful read.
1817  *
1818  * Return: length of successful bytes read on success and negative
1819  * error code on error.
1820  */
nvmem_device_read(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)1821 int nvmem_device_read(struct nvmem_device *nvmem,
1822 		      unsigned int offset,
1823 		      size_t bytes, void *buf)
1824 {
1825 	int rc;
1826 
1827 	if (!nvmem)
1828 		return -EINVAL;
1829 
1830 	rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1831 
1832 	if (rc)
1833 		return rc;
1834 
1835 	return bytes;
1836 }
1837 EXPORT_SYMBOL_GPL(nvmem_device_read);
1838 
1839 /**
1840  * nvmem_device_write() - Write cell to a given nvmem device
1841  *
1842  * @nvmem: nvmem device to be written to.
1843  * @offset: offset in nvmem device.
1844  * @bytes: number of bytes to write.
1845  * @buf: buffer to be written.
1846  *
1847  * Return: length of bytes written or negative error code on failure.
1848  */
nvmem_device_write(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)1849 int nvmem_device_write(struct nvmem_device *nvmem,
1850 		       unsigned int offset,
1851 		       size_t bytes, void *buf)
1852 {
1853 	int rc;
1854 
1855 	if (!nvmem)
1856 		return -EINVAL;
1857 
1858 	rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1859 
1860 	if (rc)
1861 		return rc;
1862 
1863 
1864 	return bytes;
1865 }
1866 EXPORT_SYMBOL_GPL(nvmem_device_write);
1867 
1868 /**
1869  * nvmem_add_cell_table() - register a table of cell info entries
1870  *
1871  * @table: table of cell info entries
1872  */
nvmem_add_cell_table(struct nvmem_cell_table * table)1873 void nvmem_add_cell_table(struct nvmem_cell_table *table)
1874 {
1875 	mutex_lock(&nvmem_cell_mutex);
1876 	list_add_tail(&table->node, &nvmem_cell_tables);
1877 	mutex_unlock(&nvmem_cell_mutex);
1878 }
1879 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1880 
1881 /**
1882  * nvmem_del_cell_table() - remove a previously registered cell info table
1883  *
1884  * @table: table of cell info entries
1885  */
nvmem_del_cell_table(struct nvmem_cell_table * table)1886 void nvmem_del_cell_table(struct nvmem_cell_table *table)
1887 {
1888 	mutex_lock(&nvmem_cell_mutex);
1889 	list_del(&table->node);
1890 	mutex_unlock(&nvmem_cell_mutex);
1891 }
1892 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1893 
1894 /**
1895  * nvmem_add_cell_lookups() - register a list of cell lookup entries
1896  *
1897  * @entries: array of cell lookup entries
1898  * @nentries: number of cell lookup entries in the array
1899  */
nvmem_add_cell_lookups(struct nvmem_cell_lookup * entries,size_t nentries)1900 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1901 {
1902 	int i;
1903 
1904 	mutex_lock(&nvmem_lookup_mutex);
1905 	for (i = 0; i < nentries; i++)
1906 		list_add_tail(&entries[i].node, &nvmem_lookup_list);
1907 	mutex_unlock(&nvmem_lookup_mutex);
1908 }
1909 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1910 
1911 /**
1912  * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1913  *                            entries
1914  *
1915  * @entries: array of cell lookup entries
1916  * @nentries: number of cell lookup entries in the array
1917  */
nvmem_del_cell_lookups(struct nvmem_cell_lookup * entries,size_t nentries)1918 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1919 {
1920 	int i;
1921 
1922 	mutex_lock(&nvmem_lookup_mutex);
1923 	for (i = 0; i < nentries; i++)
1924 		list_del(&entries[i].node);
1925 	mutex_unlock(&nvmem_lookup_mutex);
1926 }
1927 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1928 
1929 /**
1930  * nvmem_dev_name() - Get the name of a given nvmem device.
1931  *
1932  * @nvmem: nvmem device.
1933  *
1934  * Return: name of the nvmem device.
1935  */
nvmem_dev_name(struct nvmem_device * nvmem)1936 const char *nvmem_dev_name(struct nvmem_device *nvmem)
1937 {
1938 	return dev_name(&nvmem->dev);
1939 }
1940 EXPORT_SYMBOL_GPL(nvmem_dev_name);
1941 
nvmem_init(void)1942 static int __init nvmem_init(void)
1943 {
1944 	return bus_register(&nvmem_bus_type);
1945 }
1946 
nvmem_exit(void)1947 static void __exit nvmem_exit(void)
1948 {
1949 	bus_unregister(&nvmem_bus_type);
1950 }
1951 
1952 subsys_initcall(nvmem_init);
1953 module_exit(nvmem_exit);
1954 
1955 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1956 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1957 MODULE_DESCRIPTION("nvmem Driver Core");
1958 MODULE_LICENSE("GPL v2");
1959