1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * nvmem framework core.
4 *
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7 */
8
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21
22 struct nvmem_device {
23 struct module *owner;
24 struct device dev;
25 int stride;
26 int word_size;
27 int id;
28 struct kref refcnt;
29 size_t size;
30 bool read_only;
31 bool root_only;
32 int flags;
33 enum nvmem_type type;
34 struct bin_attribute eeprom;
35 struct device *base_dev;
36 struct list_head cells;
37 nvmem_reg_read_t reg_read;
38 nvmem_reg_write_t reg_write;
39 struct gpio_desc *wp_gpio;
40 void *priv;
41 };
42
43 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
44
45 #define FLAG_COMPAT BIT(0)
46
47 struct nvmem_cell {
48 const char *name;
49 int offset;
50 int bytes;
51 int bit_offset;
52 int nbits;
53 struct device_node *np;
54 struct nvmem_device *nvmem;
55 struct list_head node;
56 };
57
58 static DEFINE_MUTEX(nvmem_mutex);
59 static DEFINE_IDA(nvmem_ida);
60
61 static DEFINE_MUTEX(nvmem_cell_mutex);
62 static LIST_HEAD(nvmem_cell_tables);
63
64 static DEFINE_MUTEX(nvmem_lookup_mutex);
65 static LIST_HEAD(nvmem_lookup_list);
66
67 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
68
nvmem_reg_read(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)69 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, void *val, size_t bytes)
70 {
71 if (nvmem->reg_read) {
72 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
73 }
74
75 return -EINVAL;
76 }
77
nvmem_reg_write(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)78 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset, void *val, size_t bytes)
79 {
80 int ret;
81
82 if (nvmem->reg_write) {
83 gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
84 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
85 gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
86 return ret;
87 }
88
89 return -EINVAL;
90 }
91
92 #ifdef CONFIG_NVMEM_SYSFS
93 static const char *const nvmem_type_str[] = {
94 [NVMEM_TYPE_UNKNOWN] = "Unknown",
95 [NVMEM_TYPE_EEPROM] = "EEPROM",
96 [NVMEM_TYPE_OTP] = "OTP",
97 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
98 };
99
100 #ifdef CONFIG_DEBUG_LOCK_ALLOC
101 static struct lock_class_key eeprom_lock_key;
102 #endif
103
type_show(struct device * dev,struct device_attribute * attr,char * buf)104 static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf)
105 {
106 struct nvmem_device *nvmem = to_nvmem_device(dev);
107
108 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
109 }
110
111 static DEVICE_ATTR_RO(type);
112
113 static struct attribute *nvmem_attrs[] = {
114 &dev_attr_type.attr,
115 NULL,
116 };
117
bin_attr_nvmem_read(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)118 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf,
119 loff_t pos, size_t count)
120 {
121 struct device *dev;
122 struct nvmem_device *nvmem;
123 int rc;
124
125 if (attr->private) {
126 dev = attr->private;
127 } else {
128 dev = kobj_to_dev(kobj);
129 }
130 nvmem = to_nvmem_device(dev);
131
132 /* Stop the user from reading */
133 if (pos >= nvmem->size) {
134 return 0;
135 }
136
137 if (!IS_ALIGNED(pos, nvmem->stride)) {
138 return -EINVAL;
139 }
140
141 if (count < nvmem->word_size) {
142 return -EINVAL;
143 }
144
145 if (pos + count > nvmem->size) {
146 count = nvmem->size - pos;
147 }
148
149 count = round_down(count, nvmem->word_size);
150
151 if (!nvmem->reg_read) {
152 return -EPERM;
153 }
154 rc = nvmem_reg_read(nvmem, pos, buf, count);
155 if (rc) {
156 return rc;
157 }
158
159 return count;
160 }
161
bin_attr_nvmem_write(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)162 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj, struct bin_attribute *attr, char *buf,
163 loff_t pos, size_t count)
164 {
165 struct device *dev;
166 struct nvmem_device *nvmem;
167 int rc;
168
169 if (attr->private) {
170 dev = attr->private;
171 } else {
172 dev = kobj_to_dev(kobj);
173 }
174 nvmem = to_nvmem_device(dev);
175
176 /* Stop the user from writing */
177 if (pos >= nvmem->size) {
178 return -EFBIG;
179 }
180
181 if (!IS_ALIGNED(pos, nvmem->stride)) {
182 return -EINVAL;
183 }
184
185 if (count < nvmem->word_size) {
186 return -EINVAL;
187 }
188
189 if (pos + count > nvmem->size) {
190 count = nvmem->size - pos;
191 }
192
193 count = round_down(count, nvmem->word_size);
194
195 if (!nvmem->reg_write) {
196 return -EPERM;
197 }
198 rc = nvmem_reg_write(nvmem, pos, buf, count);
199 if (rc) {
200 return rc;
201 }
202
203 return count;
204 }
205
nvmem_bin_attr_get_umode(struct nvmem_device * nvmem)206 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
207 {
208 umode_t mode = 0x100;
209
210 if (!nvmem->root_only) {
211 mode |= 0x24;
212 }
213
214 if (!nvmem->read_only) {
215 mode |= 0x80;
216 }
217
218 if (!nvmem->reg_write) {
219 mode &= ~0x80;
220 }
221
222 if (!nvmem->reg_read) {
223 mode &= ~0x124;
224 }
225
226 return mode;
227 }
228
nvmem_bin_attr_is_visible(struct kobject * kobj,struct bin_attribute * attr,int i)229 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, struct bin_attribute *attr, int i)
230 {
231 struct device *dev = kobj_to_dev(kobj);
232 struct nvmem_device *nvmem = to_nvmem_device(dev);
233
234 attr->size = nvmem->size;
235
236 return nvmem_bin_attr_get_umode(nvmem);
237 }
238
239 /* default read/write permissions */
240 static struct bin_attribute bin_attr_rw_nvmem = {
241 .attr =
242 {
243 .name = "nvmem",
244 .mode = 0x1a4,
245 },
246 .read = bin_attr_nvmem_read,
247 .write = bin_attr_nvmem_write,
248 };
249
250 static struct bin_attribute *nvmem_bin_attributes[] = {
251 &bin_attr_rw_nvmem,
252 NULL,
253 };
254
255 static const struct attribute_group nvmem_bin_group = {
256 .bin_attrs = nvmem_bin_attributes,
257 .attrs = nvmem_attrs,
258 .is_bin_visible = nvmem_bin_attr_is_visible,
259 };
260
261 static const struct attribute_group *nvmem_dev_groups[] = {
262 &nvmem_bin_group,
263 NULL,
264 };
265
266 static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
267 .attr =
268 {
269 .name = "eeprom",
270 },
271 .read = bin_attr_nvmem_read,
272 .write = bin_attr_nvmem_write,
273 };
274
275 /*
276 * nvmem_setup_compat() - Create an additional binary entry in
277 * drivers sys directory, to be backwards compatible with the older
278 * drivers/misc/eeprom drivers.
279 */
nvmem_sysfs_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)280 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, const struct nvmem_config *config)
281 {
282 int rval;
283
284 if (!config->compat) {
285 return 0;
286 }
287
288 if (!config->base_dev) {
289 return -EINVAL;
290 }
291
292 nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
293 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
294 nvmem->eeprom.size = nvmem->size;
295 #ifdef CONFIG_DEBUG_LOCK_ALLOC
296 nvmem->eeprom.attr.key = &eeprom_lock_key;
297 #endif
298 nvmem->eeprom.private = &nvmem->dev;
299 nvmem->base_dev = config->base_dev;
300
301 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
302 if (rval) {
303 dev_err(&nvmem->dev, "Failed to create eeprom binary file %d\n", rval);
304 return rval;
305 }
306
307 nvmem->flags |= FLAG_COMPAT;
308
309 return 0;
310 }
311
nvmem_sysfs_remove_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)312 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, const struct nvmem_config *config)
313 {
314 if (config->compat) {
315 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
316 }
317 }
318
319 #else /* CONFIG_NVMEM_SYSFS */
320
nvmem_sysfs_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)321 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, const struct nvmem_config *config)
322 {
323 return -ENOSYS;
324 }
nvmem_sysfs_remove_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)325 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, const struct nvmem_config *config)
326 {
327 }
328
329 #endif /* CONFIG_NVMEM_SYSFS */
330
nvmem_release(struct device * dev)331 static void nvmem_release(struct device *dev)
332 {
333 struct nvmem_device *nvmem = to_nvmem_device(dev);
334
335 ida_free(&nvmem_ida, nvmem->id);
336 gpiod_put(nvmem->wp_gpio);
337 kfree(nvmem);
338 }
339
340 static const struct device_type nvmem_provider_type = {
341 .release = nvmem_release,
342 };
343
344 static struct bus_type nvmem_bus_type = {
345 .name = "nvmem",
346 };
347
nvmem_cell_drop(struct nvmem_cell * cell)348 static void nvmem_cell_drop(struct nvmem_cell *cell)
349 {
350 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
351 mutex_lock(&nvmem_mutex);
352 list_del(&cell->node);
353 mutex_unlock(&nvmem_mutex);
354 of_node_put(cell->np);
355 kfree_const(cell->name);
356 kfree(cell);
357 }
358
nvmem_device_remove_all_cells(const struct nvmem_device * nvmem)359 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
360 {
361 struct nvmem_cell *cell, *p;
362
363 list_for_each_entry_safe(cell, p, &nvmem->cells, node) nvmem_cell_drop(cell);
364 }
365
nvmem_cell_add(struct nvmem_cell * cell)366 static void nvmem_cell_add(struct nvmem_cell *cell)
367 {
368 mutex_lock(&nvmem_mutex);
369 list_add_tail(&cell->node, &cell->nvmem->cells);
370 mutex_unlock(&nvmem_mutex);
371 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
372 }
373
nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell * cell)374 static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem, const struct nvmem_cell_info *info,
375 struct nvmem_cell *cell)
376 {
377 cell->nvmem = nvmem;
378 cell->offset = info->offset;
379 cell->bytes = info->bytes;
380 cell->name = info->name;
381
382 cell->bit_offset = info->bit_offset;
383 cell->nbits = info->nbits;
384
385 if (cell->nbits) {
386 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, BITS_PER_BYTE);
387 }
388
389 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
390 dev_err(&nvmem->dev, "cell %s unaligned to nvmem stride %d\n", cell->name ?: "<unknown>", nvmem->stride);
391 return -EINVAL;
392 }
393
394 return 0;
395 }
396
nvmem_cell_info_to_nvmem_cell(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell * cell)397 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem, const struct nvmem_cell_info *info,
398 struct nvmem_cell *cell)
399 {
400 int err;
401
402 err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell);
403 if (err) {
404 return err;
405 }
406
407 cell->name = kstrdup_const(info->name, GFP_KERNEL);
408 if (!cell->name) {
409 return -ENOMEM;
410 }
411
412 return 0;
413 }
414
415 /**
416 * nvmem_add_cells() - Add cell information to an nvmem device
417 *
418 * @nvmem: nvmem device to add cells to.
419 * @info: nvmem cell info to add to the device
420 * @ncells: number of cells in info
421 *
422 * Return: 0 or negative error code on failure.
423 */
nvmem_add_cells(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,int ncells)424 static int nvmem_add_cells(struct nvmem_device *nvmem, const struct nvmem_cell_info *info, int ncells)
425 {
426 struct nvmem_cell **cells;
427 int i, rval;
428
429 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
430 if (!cells) {
431 return -ENOMEM;
432 }
433
434 for (i = 0; i < ncells; i++) {
435 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
436 if (!cells[i]) {
437 rval = -ENOMEM;
438 goto err;
439 }
440
441 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
442 if (rval) {
443 kfree(cells[i]);
444 goto err;
445 }
446
447 nvmem_cell_add(cells[i]);
448 }
449
450 /* remove tmp array */
451 kfree(cells);
452
453 return 0;
454 err:
455 while (i--) {
456 nvmem_cell_drop(cells[i]);
457 }
458
459 kfree(cells);
460
461 return rval;
462 }
463
464 /**
465 * nvmem_register_notifier() - Register a notifier block for nvmem events.
466 *
467 * @nb: notifier block to be called on nvmem events.
468 *
469 * Return: 0 on success, negative error number on failure.
470 */
nvmem_register_notifier(struct notifier_block * nb)471 int nvmem_register_notifier(struct notifier_block *nb)
472 {
473 return blocking_notifier_chain_register(&nvmem_notifier, nb);
474 }
475 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
476
477 /**
478 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
479 *
480 * @nb: notifier block to be unregistered.
481 *
482 * Return: 0 on success, negative error number on failure.
483 */
nvmem_unregister_notifier(struct notifier_block * nb)484 int nvmem_unregister_notifier(struct notifier_block *nb)
485 {
486 return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
487 }
488 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
489
nvmem_add_cells_from_table(struct nvmem_device * nvmem)490 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
491 {
492 const struct nvmem_cell_info *info;
493 struct nvmem_cell_table *table;
494 struct nvmem_cell *cell;
495 int rval = 0, i;
496
497 mutex_lock(&nvmem_cell_mutex);
498 list_for_each_entry(table, &nvmem_cell_tables, node)
499 {
500 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
501 for (i = 0; i < table->ncells; i++) {
502 info = &table->cells[i];
503
504 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
505 if (!cell) {
506 rval = -ENOMEM;
507 goto out;
508 }
509
510 rval = nvmem_cell_info_to_nvmem_cell(nvmem, info, cell);
511 if (rval) {
512 kfree(cell);
513 goto out;
514 }
515
516 nvmem_cell_add(cell);
517 }
518 }
519 }
520
521 out:
522 mutex_unlock(&nvmem_cell_mutex);
523 return rval;
524 }
525
nvmem_find_cell_by_name(struct nvmem_device * nvmem,const char * cell_id)526 static struct nvmem_cell *nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
527 {
528 struct nvmem_cell *iter, *cell = NULL;
529
530 mutex_lock(&nvmem_mutex);
531 list_for_each_entry(iter, &nvmem->cells, node)
532 {
533 if (strcmp(cell_id, iter->name) == 0) {
534 cell = iter;
535 break;
536 }
537 }
538 mutex_unlock(&nvmem_mutex);
539
540 return cell;
541 }
542
nvmem_add_cells_from_of(struct nvmem_device * nvmem)543 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
544 {
545 struct device_node *parent, *child;
546 struct device *dev = &nvmem->dev;
547 struct nvmem_cell *cell;
548 const __be32 *addr;
549 int len;
550
551 parent = dev->of_node;
552
553 for_each_child_of_node(parent, child)
554 {
555 addr = of_get_property(child, "reg", &len);
556 if (!addr) {
557 continue;
558 }
559 if (len < 0x2 * sizeof(u32)) {
560 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
561 of_node_put(child);
562 return -EINVAL;
563 }
564
565 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
566 if (!cell) {
567 of_node_put(child);
568 return -ENOMEM;
569 }
570
571 cell->nvmem = nvmem;
572 cell->offset = be32_to_cpup(addr++);
573 cell->bytes = be32_to_cpup(addr);
574 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
575
576 addr = of_get_property(child, "bits", &len);
577 if (addr && len == (0x2 * sizeof(u32))) {
578 cell->bit_offset = be32_to_cpup(addr++);
579 cell->nbits = be32_to_cpup(addr);
580 }
581
582 if (cell->nbits) {
583 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset, BITS_PER_BYTE);
584 }
585
586 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
587 dev_err(dev, "cell %s unaligned to nvmem stride %d\n", cell->name, nvmem->stride);
588 /* Cells already added will be freed later. */
589 kfree_const(cell->name);
590 kfree(cell);
591 of_node_put(child);
592 return -EINVAL;
593 }
594
595 cell->np = of_node_get(child);
596 nvmem_cell_add(cell);
597 }
598
599 return 0;
600 }
601
602 /**
603 * nvmem_register() - Register a nvmem device for given nvmem_config.
604 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
605 *
606 * @config: nvmem device configuration with which nvmem device is created.
607 *
608 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
609 * on success.
610 */
611
nvmem_register(const struct nvmem_config * config)612 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
613 {
614 struct nvmem_device *nvmem;
615 int rval;
616
617 if (!config->dev) {
618 return ERR_PTR(-EINVAL);
619 }
620
621 if (!config->reg_read && !config->reg_write) {
622 return ERR_PTR(-EINVAL);
623 }
624
625 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
626 if (!nvmem) {
627 return ERR_PTR(-ENOMEM);
628 }
629
630 rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
631 if (rval < 0) {
632 kfree(nvmem);
633 return ERR_PTR(rval);
634 }
635
636 if (config->wp_gpio) {
637 nvmem->wp_gpio = config->wp_gpio;
638 } else {
639 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp", GPIOD_OUT_HIGH);
640 }
641 if (IS_ERR(nvmem->wp_gpio)) {
642 ida_free(&nvmem_ida, nvmem->id);
643 rval = PTR_ERR(nvmem->wp_gpio);
644 kfree(nvmem);
645 return ERR_PTR(rval);
646 }
647
648 kref_init(&nvmem->refcnt);
649 INIT_LIST_HEAD(&nvmem->cells);
650
651 nvmem->id = rval;
652 nvmem->owner = config->owner;
653 if (!nvmem->owner && config->dev->driver) {
654 nvmem->owner = config->dev->driver->owner;
655 }
656 nvmem->stride = config->stride ?: 1;
657 nvmem->word_size = config->word_size ?: 1;
658 nvmem->size = config->size;
659 nvmem->dev.type = &nvmem_provider_type;
660 nvmem->dev.bus = &nvmem_bus_type;
661 nvmem->dev.parent = config->dev;
662 nvmem->root_only = config->root_only;
663 nvmem->priv = config->priv;
664 nvmem->type = config->type;
665 nvmem->reg_read = config->reg_read;
666 nvmem->reg_write = config->reg_write;
667 if (!config->no_of_node) {
668 nvmem->dev.of_node = config->dev->of_node;
669 }
670
671 switch (config->id) {
672 case NVMEM_DEVID_NONE:
673 dev_set_name(&nvmem->dev, "%s", config->name);
674 break;
675 case NVMEM_DEVID_AUTO:
676 dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
677 break;
678 default:
679 dev_set_name(&nvmem->dev, "%s%d", config->name ?: "nvmem", config->name ? config->id : nvmem->id);
680 break;
681 }
682
683 nvmem->read_only = device_property_present(config->dev, "read-only") || config->read_only || !nvmem->reg_write;
684
685 #ifdef CONFIG_NVMEM_SYSFS
686 nvmem->dev.groups = nvmem_dev_groups;
687 #endif
688
689 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
690
691 rval = device_register(&nvmem->dev);
692 if (rval) {
693 goto err_put_device;
694 }
695
696 if (config->compat) {
697 rval = nvmem_sysfs_setup_compat(nvmem, config);
698 if (rval) {
699 goto err_device_del;
700 }
701 }
702
703 if (config->cells) {
704 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
705 if (rval) {
706 goto err_teardown_compat;
707 }
708 }
709
710 rval = nvmem_add_cells_from_table(nvmem);
711 if (rval) {
712 goto err_remove_cells;
713 }
714
715 rval = nvmem_add_cells_from_of(nvmem);
716 if (rval) {
717 goto err_remove_cells;
718 }
719
720 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
721
722 return nvmem;
723
724 err_remove_cells:
725 nvmem_device_remove_all_cells(nvmem);
726 err_teardown_compat:
727 if (config->compat) {
728 nvmem_sysfs_remove_compat(nvmem, config);
729 }
730 err_device_del:
731 device_del(&nvmem->dev);
732 err_put_device:
733 put_device(&nvmem->dev);
734
735 return ERR_PTR(rval);
736 }
737 EXPORT_SYMBOL_GPL(nvmem_register);
738
nvmem_device_release(struct kref * kref)739 static void nvmem_device_release(struct kref *kref)
740 {
741 struct nvmem_device *nvmem;
742
743 nvmem = container_of(kref, struct nvmem_device, refcnt);
744
745 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
746
747 if (nvmem->flags & FLAG_COMPAT) {
748 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
749 }
750
751 nvmem_device_remove_all_cells(nvmem);
752 device_unregister(&nvmem->dev);
753 }
754
755 /**
756 * nvmem_unregister() - Unregister previously registered nvmem device
757 *
758 * @nvmem: Pointer to previously registered nvmem device.
759 */
nvmem_unregister(struct nvmem_device * nvmem)760 void nvmem_unregister(struct nvmem_device *nvmem)
761 {
762 kref_put(&nvmem->refcnt, nvmem_device_release);
763 }
764 EXPORT_SYMBOL_GPL(nvmem_unregister);
765
devm_nvmem_release(struct device * dev,void * res)766 static void devm_nvmem_release(struct device *dev, void *res)
767 {
768 nvmem_unregister(*(struct nvmem_device **)res);
769 }
770
771 /**
772 * devm_nvmem_register() - Register a managed nvmem device for given
773 * nvmem_config.
774 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
775 *
776 * @dev: Device that uses the nvmem device.
777 * @config: nvmem device configuration with which nvmem device is created.
778 *
779 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
780 * on success.
781 */
devm_nvmem_register(struct device * dev,const struct nvmem_config * config)782 struct nvmem_device *devm_nvmem_register(struct device *dev, const struct nvmem_config *config)
783 {
784 struct nvmem_device **ptr, *nvmem;
785
786 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
787 if (!ptr) {
788 return ERR_PTR(-ENOMEM);
789 }
790 nvmem = nvmem_register(config);
791 if (!IS_ERR(nvmem)) {
792 *ptr = nvmem;
793 devres_add(dev, ptr);
794 } else {
795 devres_free(ptr);
796 }
797
798 return nvmem;
799 }
800 EXPORT_SYMBOL_GPL(devm_nvmem_register);
801
devm_nvmem_match(struct device * dev,void * res,void * data)802 static int devm_nvmem_match(struct device *dev, void *res, void *data)
803 {
804 struct nvmem_device **r = res;
805
806 return *r == data;
807 }
808
809 /**
810 * devm_nvmem_unregister() - Unregister previously registered managed nvmem
811 * device.
812 *
813 * @dev: Device that uses the nvmem device.
814 * @nvmem: Pointer to previously registered nvmem device.
815 *
816 * Return: Will be negative on error or zero on success.
817 */
devm_nvmem_unregister(struct device * dev,struct nvmem_device * nvmem)818 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
819 {
820 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
821 }
822 EXPORT_SYMBOL(devm_nvmem_unregister);
823
nvmem_device_get_ext(void * data,int (* match)(struct device * dev,const void * data))824 static struct nvmem_device *nvmem_device_get_ext(void *data, int (*match)(struct device *dev, const void *data))
825 {
826 struct nvmem_device *nvmem = NULL;
827 struct device *dev;
828
829 mutex_lock(&nvmem_mutex);
830 dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
831 if (dev) {
832 nvmem = to_nvmem_device(dev);
833 }
834 mutex_unlock(&nvmem_mutex);
835 if (!nvmem) {
836 return ERR_PTR(-EPROBE_DEFER);
837 }
838
839 if (!try_module_get(nvmem->owner)) {
840 dev_err(&nvmem->dev, "could not increase module refcount for cell %s\n", nvmem_dev_name(nvmem));
841
842 put_device(&nvmem->dev);
843 return ERR_PTR(-EINVAL);
844 }
845
846 kref_get(&nvmem->refcnt);
847
848 return nvmem;
849 }
850
nvmem_device_put_ext(struct nvmem_device * nvmem)851 static void nvmem_device_put_ext(struct nvmem_device *nvmem)
852 {
853 put_device(&nvmem->dev);
854 module_put(nvmem->owner);
855 kref_put(&nvmem->refcnt, nvmem_device_release);
856 }
857
858 #if IS_ENABLED(CONFIG_OF)
859 /**
860 * of_nvmem_device_get() - Get nvmem device from a given id
861 *
862 * @np: Device tree node that uses the nvmem device.
863 * @id: nvmem name from nvmem-names property.
864 *
865 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
866 * on success.
867 */
of_nvmem_device_get(struct device_node * np,const char * id)868 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
869 {
870 struct device_node *nvmem_np;
871 struct nvmem_device *nvmem;
872 int index = 0;
873
874 if (id) {
875 index = of_property_match_string(np, "nvmem-names", id);
876 }
877
878 nvmem_np = of_parse_phandle(np, "nvmem", index);
879 if (!nvmem_np) {
880 return ERR_PTR(-ENOENT);
881 }
882
883 nvmem = nvmem_device_get_ext(nvmem_np, device_match_of_node);
884 of_node_put(nvmem_np);
885 return nvmem;
886 }
887 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
888 #endif
889
890 /**
891 * nvmem_device_get() - Get nvmem device from a given id
892 *
893 * @dev: Device that uses the nvmem device.
894 * @dev_name: name of the requested nvmem device.
895 *
896 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
897 * on success.
898 */
nvmem_device_get(struct device * dev,const char * dev_name)899 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
900 {
901 if (dev->of_node) { /* try dt first */
902 struct nvmem_device *nvmem;
903 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
904 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER) {
905 return nvmem;
906 }
907 }
908 return nvmem_device_get_ext((void *)dev_name, device_match_name);
909 }
910 EXPORT_SYMBOL_GPL(nvmem_device_get);
911
912 /**
913 * nvmem_device_find() - Find nvmem device with matching function
914 *
915 * @data: Data to pass to match function
916 * @match: Callback function to check device
917 *
918 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
919 * on success.
920 */
nvmem_device_find(void * data,int (* match)(struct device * dev,const void * data))921 struct nvmem_device *nvmem_device_find(void *data, int (*match)(struct device *dev, const void *data))
922 {
923 return nvmem_device_get_ext(data, match);
924 }
925 EXPORT_SYMBOL_GPL(nvmem_device_find);
926
devm_nvmem_device_match(struct device * dev,void * res,void * data)927 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
928 {
929 struct nvmem_device **nvmem = res;
930
931 if (WARN_ON(!nvmem || !*nvmem)) {
932 return 0;
933 }
934
935 return *nvmem == data;
936 }
937
devm_nvmem_device_release(struct device * dev,void * res)938 static void devm_nvmem_device_release(struct device *dev, void *res)
939 {
940 nvmem_device_put(*(struct nvmem_device **)res);
941 }
942
943 /**
944 * devm_nvmem_device_put() - put alredy got nvmem device
945 *
946 * @dev: Device that uses the nvmem device.
947 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
948 * that needs to be released.
949 */
devm_nvmem_device_put(struct device * dev,struct nvmem_device * nvmem)950 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
951 {
952 int ret;
953
954 ret = devres_release(dev, devm_nvmem_device_release, devm_nvmem_device_match, nvmem);
955
956 WARN_ON(ret);
957 }
958 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
959
960 /**
961 * nvmem_device_put() - put alredy got nvmem device
962 *
963 * @nvmem: pointer to nvmem device that needs to be released.
964 */
nvmem_device_put(struct nvmem_device * nvmem)965 void nvmem_device_put(struct nvmem_device *nvmem)
966 {
967 nvmem_device_put_ext(nvmem);
968 }
969 EXPORT_SYMBOL_GPL(nvmem_device_put);
970
971 /**
972 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
973 *
974 * @dev: Device that requests the nvmem device.
975 * @id: name id for the requested nvmem device.
976 *
977 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
978 * on success. The nvmem_cell will be freed by the automatically once the
979 * device is freed.
980 */
devm_nvmem_device_get(struct device * dev,const char * id)981 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
982 {
983 struct nvmem_device **ptr, *nvmem;
984
985 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
986 if (!ptr) {
987 return ERR_PTR(-ENOMEM);
988 }
989
990 nvmem = nvmem_device_get(dev, id);
991 if (!IS_ERR(nvmem)) {
992 *ptr = nvmem;
993 devres_add(dev, ptr);
994 } else {
995 devres_free(ptr);
996 }
997
998 return nvmem;
999 }
1000 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
1001
nvmem_cell_get_from_lookup(struct device * dev,const char * con_id)1002 static struct nvmem_cell *nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
1003 {
1004 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
1005 struct nvmem_cell_lookup *lookup;
1006 struct nvmem_device *nvmem;
1007 const char *dev_id;
1008
1009 if (!dev) {
1010 return ERR_PTR(-EINVAL);
1011 }
1012
1013 dev_id = dev_name(dev);
1014
1015 mutex_lock(&nvmem_lookup_mutex);
1016
1017 list_for_each_entry(lookup, &nvmem_lookup_list, node)
1018 {
1019 if ((strcmp(lookup->dev_id, dev_id) == 0) && (strcmp(lookup->con_id, con_id) == 0)) {
1020 /* This is the right entry. */
1021 nvmem = nvmem_device_get_ext((void *)lookup->nvmem_name, device_match_name);
1022 if (IS_ERR(nvmem)) {
1023 /* Provider may not be registered yet. */
1024 cell = ERR_CAST(nvmem);
1025 break;
1026 }
1027
1028 cell = nvmem_find_cell_by_name(nvmem, lookup->cell_name);
1029 if (!cell) {
1030 nvmem_device_put_ext(nvmem);
1031 cell = ERR_PTR(-ENOENT);
1032 }
1033 break;
1034 }
1035 }
1036
1037 mutex_unlock(&nvmem_lookup_mutex);
1038 return cell;
1039 }
1040
1041 #if IS_ENABLED(CONFIG_OF)
nvmem_find_cell_by_node(struct nvmem_device * nvmem,struct device_node * np)1042 static struct nvmem_cell *nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
1043 {
1044 struct nvmem_cell *iter, *cell = NULL;
1045
1046 mutex_lock(&nvmem_mutex);
1047 list_for_each_entry(iter, &nvmem->cells, node)
1048 {
1049 if (np == iter->np) {
1050 cell = iter;
1051 break;
1052 }
1053 }
1054 mutex_unlock(&nvmem_mutex);
1055
1056 return cell;
1057 }
1058
1059 /**
1060 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1061 *
1062 * @np: Device tree node that uses the nvmem cell.
1063 * @id: nvmem cell name from nvmem-cell-names property, or NULL
1064 * for the cell at index 0 (the lone cell with no accompanying
1065 * nvmem-cell-names property).
1066 *
1067 * Return: Will be an ERR_PTR() on error or a valid pointer
1068 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1069 * nvmem_cell_put().
1070 */
of_nvmem_cell_get(struct device_node * np,const char * id)1071 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1072 {
1073 struct device_node *cell_np, *nvmem_np;
1074 struct nvmem_device *nvmem;
1075 struct nvmem_cell *cell;
1076 int index = 0;
1077
1078 /* if cell name exists, find index to the name */
1079 if (id) {
1080 index = of_property_match_string(np, "nvmem-cell-names", id);
1081 }
1082
1083 cell_np = of_parse_phandle(np, "nvmem-cells", index);
1084 if (!cell_np) {
1085 return ERR_PTR(-ENOENT);
1086 }
1087
1088 nvmem_np = of_get_next_parent(cell_np);
1089 if (!nvmem_np) {
1090 return ERR_PTR(-EINVAL);
1091 }
1092
1093 nvmem = nvmem_device_get_ext(nvmem_np, device_match_of_node);
1094 of_node_put(nvmem_np);
1095 if (IS_ERR(nvmem)) {
1096 return ERR_CAST(nvmem);
1097 }
1098
1099 cell = nvmem_find_cell_by_node(nvmem, cell_np);
1100 if (!cell) {
1101 nvmem_device_put_ext(nvmem);
1102 return ERR_PTR(-ENOENT);
1103 }
1104
1105 return cell;
1106 }
1107 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1108 #endif
1109
1110 /**
1111 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1112 *
1113 * @dev: Device that requests the nvmem cell.
1114 * @id: nvmem cell name to get (this corresponds with the name from the
1115 * nvmem-cell-names property for DT systems and with the con_id from
1116 * the lookup entry for non-DT systems).
1117 *
1118 * Return: Will be an ERR_PTR() on error or a valid pointer
1119 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1120 * nvmem_cell_put().
1121 */
nvmem_cell_get(struct device * dev,const char * id)1122 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1123 {
1124 struct nvmem_cell *cell;
1125
1126 if (dev->of_node) { /* try dt first */
1127 cell = of_nvmem_cell_get(dev->of_node, id);
1128 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER) {
1129 return cell;
1130 }
1131 }
1132
1133 /* NULL cell id only allowed for device tree; invalid otherwise */
1134 if (!id) {
1135 return ERR_PTR(-EINVAL);
1136 }
1137
1138 return nvmem_cell_get_from_lookup(dev, id);
1139 }
1140 EXPORT_SYMBOL_GPL(nvmem_cell_get);
1141
devm_nvmem_cell_release(struct device * dev,void * res)1142 static void devm_nvmem_cell_release(struct device *dev, void *res)
1143 {
1144 nvmem_cell_put(*(struct nvmem_cell **)res);
1145 }
1146
1147 /**
1148 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1149 *
1150 * @dev: Device that requests the nvmem cell.
1151 * @id: nvmem cell name id to get.
1152 *
1153 * Return: Will be an ERR_PTR() on error or a valid pointer
1154 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1155 * automatically once the device is freed.
1156 */
devm_nvmem_cell_get(struct device * dev,const char * id)1157 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1158 {
1159 struct nvmem_cell **ptr, *cell;
1160
1161 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1162 if (!ptr) {
1163 return ERR_PTR(-ENOMEM);
1164 }
1165
1166 cell = nvmem_cell_get(dev, id);
1167 if (!IS_ERR(cell)) {
1168 *ptr = cell;
1169 devres_add(dev, ptr);
1170 } else {
1171 devres_free(ptr);
1172 }
1173
1174 return cell;
1175 }
1176 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1177
devm_nvmem_cell_match(struct device * dev,void * res,void * data)1178 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1179 {
1180 struct nvmem_cell **c = res;
1181
1182 if (WARN_ON(!c || !*c)) {
1183 return 0;
1184 }
1185
1186 return *c == data;
1187 }
1188
1189 /**
1190 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1191 * from devm_nvmem_cell_get.
1192 *
1193 * @dev: Device that requests the nvmem cell.
1194 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1195 */
devm_nvmem_cell_put(struct device * dev,struct nvmem_cell * cell)1196 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1197 {
1198 int ret;
1199
1200 ret = devres_release(dev, devm_nvmem_cell_release, devm_nvmem_cell_match, cell);
1201
1202 WARN_ON(ret);
1203 }
1204 EXPORT_SYMBOL(devm_nvmem_cell_put);
1205
1206 /**
1207 * nvmem_cell_put() - Release previously allocated nvmem cell.
1208 *
1209 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1210 */
nvmem_cell_put(struct nvmem_cell * cell)1211 void nvmem_cell_put(struct nvmem_cell *cell)
1212 {
1213 struct nvmem_device *nvmem = cell->nvmem;
1214
1215 nvmem_device_put_ext(nvmem);
1216 }
1217 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1218
nvmem_shift_read_buffer_in_place(struct nvmem_cell * cell,void * buf)1219 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
1220 {
1221 u8 *p, *b;
1222 int i, extra, bit_offset = cell->bit_offset;
1223
1224 p = b = buf;
1225 if (bit_offset) {
1226 /* First shift */
1227 *b++ >>= bit_offset;
1228
1229 /* setup rest of the bytes if any */
1230 for (i = 1; i < cell->bytes; i++) {
1231 /* Get bits from next byte and shift them towards msb */
1232 *p |= *b << (BITS_PER_BYTE - bit_offset);
1233
1234 p = b;
1235 *b++ >>= bit_offset;
1236 }
1237 } else {
1238 /* point to the msb */
1239 p += cell->bytes - 1;
1240 }
1241
1242 /* result fits in less bytes */
1243 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1244 while (--extra >= 0) {
1245 *p-- = 0;
1246 }
1247
1248 /* clear msb bits if any leftover in the last byte */
1249 if (cell->nbits % BITS_PER_BYTE) {
1250 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
1251 }
1252 }
1253
nvmem_cell_read_ext(struct nvmem_device * nvmem,struct nvmem_cell * cell,void * buf,size_t * len)1254 static int nvmem_cell_read_ext(struct nvmem_device *nvmem, struct nvmem_cell *cell, void *buf, size_t *len)
1255 {
1256 int rc;
1257 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
1258 if (rc) {
1259 return rc;
1260 }
1261 /* shift bits in-place */
1262 if (cell->bit_offset || cell->nbits) {
1263 nvmem_shift_read_buffer_in_place(cell, buf);
1264 }
1265 if (len) {
1266 *len = cell->bytes;
1267 }
1268 return 0;
1269 }
1270
1271 /**
1272 * nvmem_cell_read() - Read a given nvmem cell
1273 *
1274 * @cell: nvmem cell to be read.
1275 * @len: pointer to length of cell which will be populated on successful read;
1276 * can be NULL.
1277 *
1278 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1279 * buffer should be freed by the consumer with a kfree().
1280 */
nvmem_cell_read(struct nvmem_cell * cell,size_t * len)1281 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1282 {
1283 struct nvmem_device *nvmem = cell->nvmem;
1284 u8 *buf;
1285 int rc;
1286
1287 if (!nvmem) {
1288 return ERR_PTR(-EINVAL);
1289 }
1290
1291 buf = kzalloc(cell->bytes, GFP_KERNEL);
1292 if (!buf) {
1293 return ERR_PTR(-ENOMEM);
1294 }
1295
1296 rc = nvmem_cell_read_ext(nvmem, cell, buf, len);
1297 if (rc) {
1298 kfree(buf);
1299 return ERR_PTR(rc);
1300 }
1301
1302 return buf;
1303 }
1304 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1305
nvmem_cell_prepare_write_buffer(struct nvmem_cell * cell,u8 * _buf,int len)1306 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell, u8 *_buf, int len)
1307 {
1308 struct nvmem_device *nvmem = cell->nvmem;
1309 int i, rc, nbits, bit_offset = cell->bit_offset;
1310 u8 v, *p, *buf, *b, pbyte, pbits;
1311
1312 nbits = cell->nbits;
1313 buf = kzalloc(cell->bytes, GFP_KERNEL);
1314 if (!buf) {
1315 return ERR_PTR(-ENOMEM);
1316 }
1317
1318 memcpy(buf, _buf, len);
1319 p = b = buf;
1320
1321 if (bit_offset) {
1322 pbyte = *b;
1323 *b <<= bit_offset;
1324
1325 /* setup the first byte with lsb bits from nvmem */
1326 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1327 if (rc) {
1328 goto err;
1329 }
1330 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1331
1332 /* setup rest of the byte if any */
1333 for (i = 1; i < cell->bytes; i++) {
1334 /* Get last byte bits and shift them towards lsb */
1335 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1336 pbyte = *b;
1337 p = b;
1338 *b <<= bit_offset;
1339 *b++ |= pbits;
1340 }
1341 }
1342
1343 /* if it's not end on byte boundary */
1344 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1345 /* setup the last byte with msb bits from nvmem */
1346 rc = nvmem_reg_read(nvmem, cell->offset + cell->bytes - 1, &v, 1);
1347 if (rc) {
1348 goto err;
1349 }
1350 *p |= GENMASK(0x7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1351 }
1352
1353 return buf;
1354 err:
1355 kfree(buf);
1356 return ERR_PTR(rc);
1357 }
1358
1359 /**
1360 * nvmem_cell_write() - Write to a given nvmem cell
1361 *
1362 * @cell: nvmem cell to be written.
1363 * @buf: Buffer to be written.
1364 * @len: length of buffer to be written to nvmem cell.
1365 *
1366 * Return: length of bytes written or negative on failure.
1367 */
nvmem_cell_write(struct nvmem_cell * cell,void * buf,size_t len)1368 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1369 {
1370 struct nvmem_device *nvmem = cell->nvmem;
1371 int rc;
1372
1373 if (!nvmem || nvmem->read_only || (cell->bit_offset == 0 && len != cell->bytes)) {
1374 return -EINVAL;
1375 }
1376
1377 if (cell->bit_offset || cell->nbits) {
1378 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1379 if (IS_ERR(buf)) {
1380 return PTR_ERR(buf);
1381 }
1382 }
1383
1384 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1385
1386 /* free the tmp buffer */
1387 if (cell->bit_offset || cell->nbits) {
1388 kfree(buf);
1389 }
1390
1391 if (rc) {
1392 return rc;
1393 }
1394
1395 return len;
1396 }
1397 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1398
nvmem_cell_read_common(struct device * dev,const char * cell_id,void * val,size_t count)1399 static int nvmem_cell_read_common(struct device *dev, const char *cell_id, void *val, size_t count)
1400 {
1401 struct nvmem_cell *cell;
1402 void *buf;
1403 size_t len;
1404
1405 cell = nvmem_cell_get(dev, cell_id);
1406 if (IS_ERR(cell)) {
1407 return PTR_ERR(cell);
1408 }
1409
1410 buf = nvmem_cell_read(cell, &len);
1411 if (IS_ERR(buf)) {
1412 nvmem_cell_put(cell);
1413 return PTR_ERR(buf);
1414 }
1415 if (len != count) {
1416 kfree(buf);
1417 nvmem_cell_put(cell);
1418 return -EINVAL;
1419 }
1420 memcpy(val, buf, count);
1421 kfree(buf);
1422 nvmem_cell_put(cell);
1423
1424 return 0;
1425 }
1426
1427 /**
1428 * nvmem_cell_read_u8() - Read a cell value as a u8
1429 *
1430 * @dev: Device that requests the nvmem cell.
1431 * @cell_id: Name of nvmem cell to read.
1432 * @val: pointer to output value.
1433 *
1434 * Return: 0 on success or negative errno.
1435 */
nvmem_cell_read_u8(struct device * dev,const char * cell_id,u8 * val)1436 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
1437 {
1438 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1439 }
1440 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
1441
1442 /**
1443 * nvmem_cell_read_u16() - Read a cell value as a u16
1444 *
1445 * @dev: Device that requests the nvmem cell.
1446 * @cell_id: Name of nvmem cell to read.
1447 * @val: pointer to output value.
1448 *
1449 * Return: 0 on success or negative errno.
1450 */
nvmem_cell_read_u16(struct device * dev,const char * cell_id,u16 * val)1451 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1452 {
1453 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1454 }
1455 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1456
1457 /**
1458 * nvmem_cell_read_u32() - Read a cell value as a u32
1459 *
1460 * @dev: Device that requests the nvmem cell.
1461 * @cell_id: Name of nvmem cell to read.
1462 * @val: pointer to output value.
1463 *
1464 * Return: 0 on success or negative errno.
1465 */
nvmem_cell_read_u32(struct device * dev,const char * cell_id,u32 * val)1466 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1467 {
1468 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1469 }
1470 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1471
1472 /**
1473 * nvmem_cell_read_u64() - Read a cell value as a u64
1474 *
1475 * @dev: Device that requests the nvmem cell.
1476 * @cell_id: Name of nvmem cell to read.
1477 * @val: pointer to output value.
1478 *
1479 * Return: 0 on success or negative errno.
1480 */
nvmem_cell_read_u64(struct device * dev,const char * cell_id,u64 * val)1481 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1482 {
1483 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1484 }
1485 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1486
1487 /**
1488 * nvmem_device_cell_read() - Read a given nvmem device and cell
1489 *
1490 * @nvmem: nvmem device to read from.
1491 * @info: nvmem cell info to be read.
1492 * @buf: buffer pointer which will be populated on successful read.
1493 *
1494 * Return: length of successful bytes read on success and negative
1495 * error code on error.
1496 */
nvmem_device_cell_read(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1497 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem, struct nvmem_cell_info *info, void *buf)
1498 {
1499 struct nvmem_cell cell;
1500 int rc;
1501 ssize_t len;
1502
1503 if (!nvmem) {
1504 return -EINVAL;
1505 }
1506
1507 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
1508 if (rc) {
1509 return rc;
1510 }
1511
1512 rc = nvmem_cell_read_ext(nvmem, &cell, buf, &len);
1513 if (rc) {
1514 return rc;
1515 }
1516
1517 return len;
1518 }
1519 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1520
1521 /**
1522 * nvmem_device_cell_write() - Write cell to a given nvmem device
1523 *
1524 * @nvmem: nvmem device to be written to.
1525 * @info: nvmem cell info to be written.
1526 * @buf: buffer to be written to cell.
1527 *
1528 * Return: length of bytes written or negative error code on failure.
1529 */
nvmem_device_cell_write(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1530 int nvmem_device_cell_write(struct nvmem_device *nvmem, struct nvmem_cell_info *info, void *buf)
1531 {
1532 struct nvmem_cell cell;
1533 int rc;
1534
1535 if (!nvmem) {
1536 return -EINVAL;
1537 }
1538
1539 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
1540 if (rc) {
1541 return rc;
1542 }
1543
1544 return nvmem_cell_write(&cell, buf, cell.bytes);
1545 }
1546 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1547
1548 /**
1549 * nvmem_device_read() - Read from a given nvmem device
1550 *
1551 * @nvmem: nvmem device to read from.
1552 * @offset: offset in nvmem device.
1553 * @bytes: number of bytes to read.
1554 * @buf: buffer pointer which will be populated on successful read.
1555 *
1556 * Return: length of successful bytes read on success and negative
1557 * error code on error.
1558 */
nvmem_device_read(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)1559 int nvmem_device_read(struct nvmem_device *nvmem, unsigned int offset, size_t bytes, void *buf)
1560 {
1561 int rc;
1562 if (!nvmem) {
1563 return -EINVAL;
1564 }
1565 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1566 if (rc) {
1567 return rc;
1568 }
1569 return bytes;
1570 }
1571 EXPORT_SYMBOL_GPL(nvmem_device_read);
1572
1573 /**
1574 * nvmem_device_write() - Write cell to a given nvmem device
1575 *
1576 * @nvmem: nvmem device to be written to.
1577 * @offset: offset in nvmem device.
1578 * @bytes: number of bytes to write.
1579 * @buf: buffer to be written.
1580 *
1581 * Return: length of bytes written or negative error code on failure.
1582 */
nvmem_device_write(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)1583 int nvmem_device_write(struct nvmem_device *nvmem, unsigned int offset, size_t bytes, void *buf)
1584 {
1585 int rc;
1586 if (!nvmem) {
1587 return -EINVAL;
1588 }
1589 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1590 if (rc) {
1591 return rc;
1592 }
1593 return bytes;
1594 }
1595 EXPORT_SYMBOL_GPL(nvmem_device_write);
1596
1597 /**
1598 * nvmem_add_cell_table() - register a table of cell info entries
1599 *
1600 * @table: table of cell info entries
1601 */
nvmem_add_cell_table(struct nvmem_cell_table * table)1602 void nvmem_add_cell_table(struct nvmem_cell_table *table)
1603 {
1604 mutex_lock(&nvmem_cell_mutex);
1605 list_add_tail(&table->node, &nvmem_cell_tables);
1606 mutex_unlock(&nvmem_cell_mutex);
1607 }
1608 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1609
1610 /**
1611 * nvmem_del_cell_table() - remove a previously registered cell info table
1612 *
1613 * @table: table of cell info entries
1614 */
nvmem_del_cell_table(struct nvmem_cell_table * table)1615 void nvmem_del_cell_table(struct nvmem_cell_table *table)
1616 {
1617 mutex_lock(&nvmem_cell_mutex);
1618 list_del(&table->node);
1619 mutex_unlock(&nvmem_cell_mutex);
1620 }
1621 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1622
1623 /**
1624 * nvmem_add_cell_lookups() - register a list of cell lookup entries
1625 *
1626 * @entries: array of cell lookup entries
1627 * @nentries: number of cell lookup entries in the array
1628 */
nvmem_add_cell_lookups(struct nvmem_cell_lookup * entries,size_t nentries)1629 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1630 {
1631 int i;
1632
1633 mutex_lock(&nvmem_lookup_mutex);
1634 for (i = 0; i < nentries; i++) {
1635 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1636 }
1637 mutex_unlock(&nvmem_lookup_mutex);
1638 }
1639 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1640
1641 /**
1642 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1643 * entries
1644 *
1645 * @entries: array of cell lookup entries
1646 * @nentries: number of cell lookup entries in the array
1647 */
nvmem_del_cell_lookups(struct nvmem_cell_lookup * entries,size_t nentries)1648 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1649 {
1650 int i;
1651
1652 mutex_lock(&nvmem_lookup_mutex);
1653 for (i = 0; i < nentries; i++) {
1654 list_del(&entries[i].node);
1655 }
1656 mutex_unlock(&nvmem_lookup_mutex);
1657 }
1658 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1659
1660 /**
1661 * nvmem_dev_name() - Get the name of a given nvmem device.
1662 *
1663 * @nvmem: nvmem device.
1664 *
1665 * Return: name of the nvmem device.
1666 */
nvmem_dev_name(struct nvmem_device * nvmem)1667 const char *nvmem_dev_name(struct nvmem_device *nvmem)
1668 {
1669 return dev_name(&nvmem->dev);
1670 }
1671 EXPORT_SYMBOL_GPL(nvmem_dev_name);
1672
nvmem_init(void)1673 static int __init nvmem_init(void)
1674 {
1675 return bus_register(&nvmem_bus_type);
1676 }
1677
nvmem_exit(void)1678 static void __exit nvmem_exit(void)
1679 {
1680 bus_unregister(&nvmem_bus_type);
1681 }
1682
1683 #ifdef CONFIG_ROCKCHIP_THUNDER_BOOT
1684 arch_initcall_sync(nvmem_init);
1685 #else
1686 subsys_initcall(nvmem_init);
1687 #endif
1688 module_exit(nvmem_exit);
1689
1690 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1691 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1692 MODULE_DESCRIPTION("nvmem Driver Core");
1693 MODULE_LICENSE("GPL v2");
1694