1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * nvmem framework core.
4 *
5 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
6 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
7 */
8
9 #include <linux/device.h>
10 #include <linux/export.h>
11 #include <linux/fs.h>
12 #include <linux/idr.h>
13 #include <linux/init.h>
14 #include <linux/kref.h>
15 #include <linux/module.h>
16 #include <linux/nvmem-consumer.h>
17 #include <linux/nvmem-provider.h>
18 #include <linux/gpio/consumer.h>
19 #include <linux/of.h>
20 #include <linux/slab.h>
21
22 struct nvmem_device {
23 struct module *owner;
24 struct device dev;
25 int stride;
26 int word_size;
27 int id;
28 struct kref refcnt;
29 size_t size;
30 bool read_only;
31 bool root_only;
32 int flags;
33 enum nvmem_type type;
34 struct bin_attribute eeprom;
35 struct device *base_dev;
36 struct list_head cells;
37 const struct nvmem_keepout *keepout;
38 unsigned int nkeepout;
39 nvmem_reg_read_t reg_read;
40 nvmem_reg_write_t reg_write;
41 struct gpio_desc *wp_gpio;
42 void *priv;
43 };
44
45 #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
46
47 #define FLAG_COMPAT BIT(0)
48
49 struct nvmem_cell {
50 const char *name;
51 int offset;
52 int bytes;
53 int bit_offset;
54 int nbits;
55 struct device_node *np;
56 struct nvmem_device *nvmem;
57 struct list_head node;
58 };
59
60 static DEFINE_MUTEX(nvmem_mutex);
61 static DEFINE_IDA(nvmem_ida);
62
63 static DEFINE_MUTEX(nvmem_cell_mutex);
64 static LIST_HEAD(nvmem_cell_tables);
65
66 static DEFINE_MUTEX(nvmem_lookup_mutex);
67 static LIST_HEAD(nvmem_lookup_list);
68
69 static BLOCKING_NOTIFIER_HEAD(nvmem_notifier);
70
__nvmem_reg_read(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)71 static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
72 void *val, size_t bytes)
73 {
74 if (nvmem->reg_read)
75 return nvmem->reg_read(nvmem->priv, offset, val, bytes);
76
77 return -EINVAL;
78 }
79
__nvmem_reg_write(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)80 static int __nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
81 void *val, size_t bytes)
82 {
83 int ret;
84
85 if (nvmem->reg_write) {
86 gpiod_set_value_cansleep(nvmem->wp_gpio, 0);
87 ret = nvmem->reg_write(nvmem->priv, offset, val, bytes);
88 gpiod_set_value_cansleep(nvmem->wp_gpio, 1);
89 return ret;
90 }
91
92 return -EINVAL;
93 }
94
nvmem_access_with_keepouts(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes,int write)95 static int nvmem_access_with_keepouts(struct nvmem_device *nvmem,
96 unsigned int offset, void *val,
97 size_t bytes, int write)
98 {
99
100 unsigned int end = offset + bytes;
101 unsigned int kend, ksize;
102 const struct nvmem_keepout *keepout = nvmem->keepout;
103 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
104 int rc;
105
106 /*
107 * Skip all keepouts before the range being accessed.
108 * Keepouts are sorted.
109 */
110 while ((keepout < keepoutend) && (keepout->end <= offset))
111 keepout++;
112
113 while ((offset < end) && (keepout < keepoutend)) {
114 /* Access the valid portion before the keepout. */
115 if (offset < keepout->start) {
116 kend = min(end, keepout->start);
117 ksize = kend - offset;
118 if (write)
119 rc = __nvmem_reg_write(nvmem, offset, val, ksize);
120 else
121 rc = __nvmem_reg_read(nvmem, offset, val, ksize);
122
123 if (rc)
124 return rc;
125
126 offset += ksize;
127 val += ksize;
128 }
129
130 /*
131 * Now we're aligned to the start of this keepout zone. Go
132 * through it.
133 */
134 kend = min(end, keepout->end);
135 ksize = kend - offset;
136 if (!write)
137 memset(val, keepout->value, ksize);
138
139 val += ksize;
140 offset += ksize;
141 keepout++;
142 }
143
144 /*
145 * If we ran out of keepouts but there's still stuff to do, send it
146 * down directly
147 */
148 if (offset < end) {
149 ksize = end - offset;
150 if (write)
151 return __nvmem_reg_write(nvmem, offset, val, ksize);
152 else
153 return __nvmem_reg_read(nvmem, offset, val, ksize);
154 }
155
156 return 0;
157 }
158
nvmem_reg_read(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)159 static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
160 void *val, size_t bytes)
161 {
162 if (!nvmem->nkeepout)
163 return __nvmem_reg_read(nvmem, offset, val, bytes);
164
165 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, false);
166 }
167
nvmem_reg_write(struct nvmem_device * nvmem,unsigned int offset,void * val,size_t bytes)168 static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
169 void *val, size_t bytes)
170 {
171 if (!nvmem->nkeepout)
172 return __nvmem_reg_write(nvmem, offset, val, bytes);
173
174 return nvmem_access_with_keepouts(nvmem, offset, val, bytes, true);
175 }
176
177 #ifdef CONFIG_NVMEM_SYSFS
178 static const char * const nvmem_type_str[] = {
179 [NVMEM_TYPE_UNKNOWN] = "Unknown",
180 [NVMEM_TYPE_EEPROM] = "EEPROM",
181 [NVMEM_TYPE_OTP] = "OTP",
182 [NVMEM_TYPE_BATTERY_BACKED] = "Battery backed",
183 [NVMEM_TYPE_FRAM] = "FRAM",
184 };
185
186 #ifdef CONFIG_DEBUG_LOCK_ALLOC
187 static struct lock_class_key eeprom_lock_key;
188 #endif
189
type_show(struct device * dev,struct device_attribute * attr,char * buf)190 static ssize_t type_show(struct device *dev,
191 struct device_attribute *attr, char *buf)
192 {
193 struct nvmem_device *nvmem = to_nvmem_device(dev);
194
195 return sprintf(buf, "%s\n", nvmem_type_str[nvmem->type]);
196 }
197
198 static DEVICE_ATTR_RO(type);
199
200 static struct attribute *nvmem_attrs[] = {
201 &dev_attr_type.attr,
202 NULL,
203 };
204
bin_attr_nvmem_read(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)205 static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
206 struct bin_attribute *attr, char *buf,
207 loff_t pos, size_t count)
208 {
209 struct device *dev;
210 struct nvmem_device *nvmem;
211 int rc;
212
213 if (attr->private)
214 dev = attr->private;
215 else
216 dev = kobj_to_dev(kobj);
217 nvmem = to_nvmem_device(dev);
218
219 /* Stop the user from reading */
220 if (pos >= nvmem->size)
221 return 0;
222
223 if (!IS_ALIGNED(pos, nvmem->stride))
224 return -EINVAL;
225
226 if (count < nvmem->word_size)
227 return -EINVAL;
228
229 if (pos + count > nvmem->size)
230 count = nvmem->size - pos;
231
232 count = round_down(count, nvmem->word_size);
233
234 if (!nvmem->reg_read)
235 return -EPERM;
236
237 rc = nvmem_reg_read(nvmem, pos, buf, count);
238
239 if (rc)
240 return rc;
241
242 return count;
243 }
244
bin_attr_nvmem_write(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)245 static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
246 struct bin_attribute *attr, char *buf,
247 loff_t pos, size_t count)
248 {
249 struct device *dev;
250 struct nvmem_device *nvmem;
251 int rc;
252
253 if (attr->private)
254 dev = attr->private;
255 else
256 dev = kobj_to_dev(kobj);
257 nvmem = to_nvmem_device(dev);
258
259 /* Stop the user from writing */
260 if (pos >= nvmem->size)
261 return -EFBIG;
262
263 if (!IS_ALIGNED(pos, nvmem->stride))
264 return -EINVAL;
265
266 if (count < nvmem->word_size)
267 return -EINVAL;
268
269 if (pos + count > nvmem->size)
270 count = nvmem->size - pos;
271
272 count = round_down(count, nvmem->word_size);
273
274 if (!nvmem->reg_write)
275 return -EPERM;
276
277 rc = nvmem_reg_write(nvmem, pos, buf, count);
278
279 if (rc)
280 return rc;
281
282 return count;
283 }
284
nvmem_bin_attr_get_umode(struct nvmem_device * nvmem)285 static umode_t nvmem_bin_attr_get_umode(struct nvmem_device *nvmem)
286 {
287 umode_t mode = 0400;
288
289 if (!nvmem->root_only)
290 mode |= 0044;
291
292 if (!nvmem->read_only)
293 mode |= 0200;
294
295 if (!nvmem->reg_write)
296 mode &= ~0200;
297
298 if (!nvmem->reg_read)
299 mode &= ~0444;
300
301 return mode;
302 }
303
nvmem_bin_attr_is_visible(struct kobject * kobj,struct bin_attribute * attr,int i)304 static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj,
305 struct bin_attribute *attr, int i)
306 {
307 struct device *dev = kobj_to_dev(kobj);
308 struct nvmem_device *nvmem = to_nvmem_device(dev);
309
310 attr->size = nvmem->size;
311
312 return nvmem_bin_attr_get_umode(nvmem);
313 }
314
315 /* default read/write permissions */
316 static struct bin_attribute bin_attr_rw_nvmem = {
317 .attr = {
318 .name = "nvmem",
319 .mode = 0644,
320 },
321 .read = bin_attr_nvmem_read,
322 .write = bin_attr_nvmem_write,
323 };
324
325 static struct bin_attribute *nvmem_bin_attributes[] = {
326 &bin_attr_rw_nvmem,
327 NULL,
328 };
329
330 static const struct attribute_group nvmem_bin_group = {
331 .bin_attrs = nvmem_bin_attributes,
332 .attrs = nvmem_attrs,
333 .is_bin_visible = nvmem_bin_attr_is_visible,
334 };
335
336 static const struct attribute_group *nvmem_dev_groups[] = {
337 &nvmem_bin_group,
338 NULL,
339 };
340
341 static struct bin_attribute bin_attr_nvmem_eeprom_compat = {
342 .attr = {
343 .name = "eeprom",
344 },
345 .read = bin_attr_nvmem_read,
346 .write = bin_attr_nvmem_write,
347 };
348
349 /*
350 * nvmem_setup_compat() - Create an additional binary entry in
351 * drivers sys directory, to be backwards compatible with the older
352 * drivers/misc/eeprom drivers.
353 */
nvmem_sysfs_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)354 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
355 const struct nvmem_config *config)
356 {
357 int rval;
358
359 if (!config->compat)
360 return 0;
361
362 if (!config->base_dev)
363 return -EINVAL;
364
365 if (config->type == NVMEM_TYPE_FRAM)
366 bin_attr_nvmem_eeprom_compat.attr.name = "fram";
367
368 nvmem->eeprom = bin_attr_nvmem_eeprom_compat;
369 nvmem->eeprom.attr.mode = nvmem_bin_attr_get_umode(nvmem);
370 nvmem->eeprom.size = nvmem->size;
371 #ifdef CONFIG_DEBUG_LOCK_ALLOC
372 nvmem->eeprom.attr.key = &eeprom_lock_key;
373 #endif
374 nvmem->eeprom.private = &nvmem->dev;
375 nvmem->base_dev = config->base_dev;
376
377 rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
378 if (rval) {
379 dev_err(&nvmem->dev,
380 "Failed to create eeprom binary file %d\n", rval);
381 return rval;
382 }
383
384 nvmem->flags |= FLAG_COMPAT;
385
386 return 0;
387 }
388
nvmem_sysfs_remove_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)389 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
390 const struct nvmem_config *config)
391 {
392 if (config->compat)
393 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
394 }
395
396 #else /* CONFIG_NVMEM_SYSFS */
397
nvmem_sysfs_setup_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)398 static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem,
399 const struct nvmem_config *config)
400 {
401 return -ENOSYS;
402 }
nvmem_sysfs_remove_compat(struct nvmem_device * nvmem,const struct nvmem_config * config)403 static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem,
404 const struct nvmem_config *config)
405 {
406 }
407
408 #endif /* CONFIG_NVMEM_SYSFS */
409
nvmem_release(struct device * dev)410 static void nvmem_release(struct device *dev)
411 {
412 struct nvmem_device *nvmem = to_nvmem_device(dev);
413
414 ida_free(&nvmem_ida, nvmem->id);
415 gpiod_put(nvmem->wp_gpio);
416 kfree(nvmem);
417 }
418
419 static const struct device_type nvmem_provider_type = {
420 .release = nvmem_release,
421 };
422
423 static struct bus_type nvmem_bus_type = {
424 .name = "nvmem",
425 };
426
nvmem_cell_drop(struct nvmem_cell * cell)427 static void nvmem_cell_drop(struct nvmem_cell *cell)
428 {
429 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_REMOVE, cell);
430 mutex_lock(&nvmem_mutex);
431 list_del(&cell->node);
432 mutex_unlock(&nvmem_mutex);
433 of_node_put(cell->np);
434 kfree_const(cell->name);
435 kfree(cell);
436 }
437
nvmem_device_remove_all_cells(const struct nvmem_device * nvmem)438 static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
439 {
440 struct nvmem_cell *cell, *p;
441
442 list_for_each_entry_safe(cell, p, &nvmem->cells, node)
443 nvmem_cell_drop(cell);
444 }
445
nvmem_cell_add(struct nvmem_cell * cell)446 static void nvmem_cell_add(struct nvmem_cell *cell)
447 {
448 mutex_lock(&nvmem_mutex);
449 list_add_tail(&cell->node, &cell->nvmem->cells);
450 mutex_unlock(&nvmem_mutex);
451 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_CELL_ADD, cell);
452 }
453
nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell * cell)454 static int nvmem_cell_info_to_nvmem_cell_nodup(struct nvmem_device *nvmem,
455 const struct nvmem_cell_info *info,
456 struct nvmem_cell *cell)
457 {
458 cell->nvmem = nvmem;
459 cell->offset = info->offset;
460 cell->bytes = info->bytes;
461 cell->name = info->name;
462
463 cell->bit_offset = info->bit_offset;
464 cell->nbits = info->nbits;
465
466 if (cell->nbits)
467 cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
468 BITS_PER_BYTE);
469
470 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
471 dev_err(&nvmem->dev,
472 "cell %s unaligned to nvmem stride %d\n",
473 cell->name ?: "<unknown>", nvmem->stride);
474 return -EINVAL;
475 }
476
477 return 0;
478 }
479
nvmem_cell_info_to_nvmem_cell(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,struct nvmem_cell * cell)480 static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
481 const struct nvmem_cell_info *info,
482 struct nvmem_cell *cell)
483 {
484 int err;
485
486 err = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, cell);
487 if (err)
488 return err;
489
490 cell->name = kstrdup_const(info->name, GFP_KERNEL);
491 if (!cell->name)
492 return -ENOMEM;
493
494 return 0;
495 }
496
497 /**
498 * nvmem_add_cells() - Add cell information to an nvmem device
499 *
500 * @nvmem: nvmem device to add cells to.
501 * @info: nvmem cell info to add to the device
502 * @ncells: number of cells in info
503 *
504 * Return: 0 or negative error code on failure.
505 */
nvmem_add_cells(struct nvmem_device * nvmem,const struct nvmem_cell_info * info,int ncells)506 static int nvmem_add_cells(struct nvmem_device *nvmem,
507 const struct nvmem_cell_info *info,
508 int ncells)
509 {
510 struct nvmem_cell **cells;
511 int i, rval;
512
513 cells = kcalloc(ncells, sizeof(*cells), GFP_KERNEL);
514 if (!cells)
515 return -ENOMEM;
516
517 for (i = 0; i < ncells; i++) {
518 cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
519 if (!cells[i]) {
520 rval = -ENOMEM;
521 goto err;
522 }
523
524 rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
525 if (rval) {
526 kfree(cells[i]);
527 goto err;
528 }
529
530 nvmem_cell_add(cells[i]);
531 }
532
533 /* remove tmp array */
534 kfree(cells);
535
536 return 0;
537 err:
538 while (i--)
539 nvmem_cell_drop(cells[i]);
540
541 kfree(cells);
542
543 return rval;
544 }
545
546 /**
547 * nvmem_register_notifier() - Register a notifier block for nvmem events.
548 *
549 * @nb: notifier block to be called on nvmem events.
550 *
551 * Return: 0 on success, negative error number on failure.
552 */
nvmem_register_notifier(struct notifier_block * nb)553 int nvmem_register_notifier(struct notifier_block *nb)
554 {
555 return blocking_notifier_chain_register(&nvmem_notifier, nb);
556 }
557 EXPORT_SYMBOL_GPL(nvmem_register_notifier);
558
559 /**
560 * nvmem_unregister_notifier() - Unregister a notifier block for nvmem events.
561 *
562 * @nb: notifier block to be unregistered.
563 *
564 * Return: 0 on success, negative error number on failure.
565 */
nvmem_unregister_notifier(struct notifier_block * nb)566 int nvmem_unregister_notifier(struct notifier_block *nb)
567 {
568 return blocking_notifier_chain_unregister(&nvmem_notifier, nb);
569 }
570 EXPORT_SYMBOL_GPL(nvmem_unregister_notifier);
571
nvmem_add_cells_from_table(struct nvmem_device * nvmem)572 static int nvmem_add_cells_from_table(struct nvmem_device *nvmem)
573 {
574 const struct nvmem_cell_info *info;
575 struct nvmem_cell_table *table;
576 struct nvmem_cell *cell;
577 int rval = 0, i;
578
579 mutex_lock(&nvmem_cell_mutex);
580 list_for_each_entry(table, &nvmem_cell_tables, node) {
581 if (strcmp(nvmem_dev_name(nvmem), table->nvmem_name) == 0) {
582 for (i = 0; i < table->ncells; i++) {
583 info = &table->cells[i];
584
585 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
586 if (!cell) {
587 rval = -ENOMEM;
588 goto out;
589 }
590
591 rval = nvmem_cell_info_to_nvmem_cell(nvmem,
592 info,
593 cell);
594 if (rval) {
595 kfree(cell);
596 goto out;
597 }
598
599 nvmem_cell_add(cell);
600 }
601 }
602 }
603
604 out:
605 mutex_unlock(&nvmem_cell_mutex);
606 return rval;
607 }
608
609 static struct nvmem_cell *
nvmem_find_cell_by_name(struct nvmem_device * nvmem,const char * cell_id)610 nvmem_find_cell_by_name(struct nvmem_device *nvmem, const char *cell_id)
611 {
612 struct nvmem_cell *iter, *cell = NULL;
613
614 mutex_lock(&nvmem_mutex);
615 list_for_each_entry(iter, &nvmem->cells, node) {
616 if (strcmp(cell_id, iter->name) == 0) {
617 cell = iter;
618 break;
619 }
620 }
621 mutex_unlock(&nvmem_mutex);
622
623 return cell;
624 }
625
nvmem_validate_keepouts(struct nvmem_device * nvmem)626 static int nvmem_validate_keepouts(struct nvmem_device *nvmem)
627 {
628 unsigned int cur = 0;
629 const struct nvmem_keepout *keepout = nvmem->keepout;
630 const struct nvmem_keepout *keepoutend = keepout + nvmem->nkeepout;
631
632 while (keepout < keepoutend) {
633 /* Ensure keepouts are sorted and don't overlap. */
634 if (keepout->start < cur) {
635 dev_err(&nvmem->dev,
636 "Keepout regions aren't sorted or overlap.\n");
637
638 return -ERANGE;
639 }
640
641 if (keepout->end < keepout->start) {
642 dev_err(&nvmem->dev,
643 "Invalid keepout region.\n");
644
645 return -EINVAL;
646 }
647
648 /*
649 * Validate keepouts (and holes between) don't violate
650 * word_size constraints.
651 */
652 if ((keepout->end - keepout->start < nvmem->word_size) ||
653 ((keepout->start != cur) &&
654 (keepout->start - cur < nvmem->word_size))) {
655
656 dev_err(&nvmem->dev,
657 "Keepout regions violate word_size constraints.\n");
658
659 return -ERANGE;
660 }
661
662 /* Validate keepouts don't violate stride (alignment). */
663 if (!IS_ALIGNED(keepout->start, nvmem->stride) ||
664 !IS_ALIGNED(keepout->end, nvmem->stride)) {
665
666 dev_err(&nvmem->dev,
667 "Keepout regions violate stride.\n");
668
669 return -EINVAL;
670 }
671
672 cur = keepout->end;
673 keepout++;
674 }
675
676 return 0;
677 }
678
nvmem_add_cells_from_of(struct nvmem_device * nvmem)679 static int nvmem_add_cells_from_of(struct nvmem_device *nvmem)
680 {
681 struct device_node *parent, *child;
682 struct device *dev = &nvmem->dev;
683 struct nvmem_cell *cell;
684 const __be32 *addr;
685 int len;
686
687 parent = dev->of_node;
688
689 for_each_child_of_node(parent, child) {
690 addr = of_get_property(child, "reg", &len);
691 if (!addr)
692 continue;
693 if (len < 2 * sizeof(u32)) {
694 dev_err(dev, "nvmem: invalid reg on %pOF\n", child);
695 of_node_put(child);
696 return -EINVAL;
697 }
698
699 cell = kzalloc(sizeof(*cell), GFP_KERNEL);
700 if (!cell) {
701 of_node_put(child);
702 return -ENOMEM;
703 }
704
705 cell->nvmem = nvmem;
706 cell->offset = be32_to_cpup(addr++);
707 cell->bytes = be32_to_cpup(addr);
708 cell->name = kasprintf(GFP_KERNEL, "%pOFn", child);
709
710 addr = of_get_property(child, "bits", &len);
711 if (addr && len == (2 * sizeof(u32))) {
712 cell->bit_offset = be32_to_cpup(addr++);
713 cell->nbits = be32_to_cpup(addr);
714 }
715
716 if (cell->nbits)
717 cell->bytes = DIV_ROUND_UP(
718 cell->nbits + cell->bit_offset,
719 BITS_PER_BYTE);
720
721 if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
722 dev_err(dev, "cell %s unaligned to nvmem stride %d\n",
723 cell->name, nvmem->stride);
724 /* Cells already added will be freed later. */
725 kfree_const(cell->name);
726 kfree(cell);
727 of_node_put(child);
728 return -EINVAL;
729 }
730
731 cell->np = of_node_get(child);
732 nvmem_cell_add(cell);
733 }
734
735 return 0;
736 }
737
738 /**
739 * nvmem_register() - Register a nvmem device for given nvmem_config.
740 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
741 *
742 * @config: nvmem device configuration with which nvmem device is created.
743 *
744 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
745 * on success.
746 */
747
nvmem_register(const struct nvmem_config * config)748 struct nvmem_device *nvmem_register(const struct nvmem_config *config)
749 {
750 struct nvmem_device *nvmem;
751 int rval;
752
753 if (!config->dev)
754 return ERR_PTR(-EINVAL);
755
756 if (!config->reg_read && !config->reg_write)
757 return ERR_PTR(-EINVAL);
758
759 nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
760 if (!nvmem)
761 return ERR_PTR(-ENOMEM);
762
763 rval = ida_alloc(&nvmem_ida, GFP_KERNEL);
764 if (rval < 0) {
765 kfree(nvmem);
766 return ERR_PTR(rval);
767 }
768
769 nvmem->id = rval;
770
771 if (config->wp_gpio)
772 nvmem->wp_gpio = config->wp_gpio;
773 else if (!config->ignore_wp)
774 nvmem->wp_gpio = gpiod_get_optional(config->dev, "wp",
775 GPIOD_OUT_HIGH);
776 if (IS_ERR(nvmem->wp_gpio)) {
777 ida_free(&nvmem_ida, nvmem->id);
778 rval = PTR_ERR(nvmem->wp_gpio);
779 kfree(nvmem);
780 return ERR_PTR(rval);
781 }
782
783 kref_init(&nvmem->refcnt);
784 INIT_LIST_HEAD(&nvmem->cells);
785
786 nvmem->owner = config->owner;
787 if (!nvmem->owner && config->dev->driver)
788 nvmem->owner = config->dev->driver->owner;
789 nvmem->stride = config->stride ?: 1;
790 nvmem->word_size = config->word_size ?: 1;
791 nvmem->size = config->size;
792 nvmem->dev.type = &nvmem_provider_type;
793 nvmem->dev.bus = &nvmem_bus_type;
794 nvmem->dev.parent = config->dev;
795 nvmem->root_only = config->root_only;
796 nvmem->priv = config->priv;
797 nvmem->type = config->type;
798 nvmem->reg_read = config->reg_read;
799 nvmem->reg_write = config->reg_write;
800 nvmem->keepout = config->keepout;
801 nvmem->nkeepout = config->nkeepout;
802 if (config->of_node)
803 nvmem->dev.of_node = config->of_node;
804 else if (!config->no_of_node)
805 nvmem->dev.of_node = config->dev->of_node;
806
807 switch (config->id) {
808 case NVMEM_DEVID_NONE:
809 rval = dev_set_name(&nvmem->dev, "%s", config->name);
810 break;
811 case NVMEM_DEVID_AUTO:
812 rval = dev_set_name(&nvmem->dev, "%s%d", config->name, nvmem->id);
813 break;
814 default:
815 rval = dev_set_name(&nvmem->dev, "%s%d",
816 config->name ? : "nvmem",
817 config->name ? config->id : nvmem->id);
818 break;
819 }
820
821 if (rval) {
822 ida_free(&nvmem_ida, nvmem->id);
823 kfree(nvmem);
824 return ERR_PTR(rval);
825 }
826
827 nvmem->read_only = device_property_present(config->dev, "read-only") ||
828 config->read_only || !nvmem->reg_write;
829
830 #ifdef CONFIG_NVMEM_SYSFS
831 nvmem->dev.groups = nvmem_dev_groups;
832 #endif
833
834 dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
835
836 rval = device_register(&nvmem->dev);
837 if (rval)
838 goto err_put_device;
839
840 if (nvmem->nkeepout) {
841 rval = nvmem_validate_keepouts(nvmem);
842 if (rval)
843 goto err_device_del;
844 }
845
846 if (config->compat) {
847 rval = nvmem_sysfs_setup_compat(nvmem, config);
848 if (rval)
849 goto err_device_del;
850 }
851
852 if (config->cells) {
853 rval = nvmem_add_cells(nvmem, config->cells, config->ncells);
854 if (rval)
855 goto err_remove_cells;
856 }
857
858 rval = nvmem_add_cells_from_table(nvmem);
859 if (rval)
860 goto err_remove_cells;
861
862 rval = nvmem_add_cells_from_of(nvmem);
863 if (rval)
864 goto err_remove_cells;
865
866 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem);
867
868 return nvmem;
869
870 err_remove_cells:
871 nvmem_device_remove_all_cells(nvmem);
872 if (config->compat)
873 nvmem_sysfs_remove_compat(nvmem, config);
874 err_device_del:
875 device_del(&nvmem->dev);
876 err_put_device:
877 put_device(&nvmem->dev);
878
879 return ERR_PTR(rval);
880 }
881 EXPORT_SYMBOL_GPL(nvmem_register);
882
nvmem_device_release(struct kref * kref)883 static void nvmem_device_release(struct kref *kref)
884 {
885 struct nvmem_device *nvmem;
886
887 nvmem = container_of(kref, struct nvmem_device, refcnt);
888
889 blocking_notifier_call_chain(&nvmem_notifier, NVMEM_REMOVE, nvmem);
890
891 if (nvmem->flags & FLAG_COMPAT)
892 device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
893
894 nvmem_device_remove_all_cells(nvmem);
895 device_unregister(&nvmem->dev);
896 }
897
898 /**
899 * nvmem_unregister() - Unregister previously registered nvmem device
900 *
901 * @nvmem: Pointer to previously registered nvmem device.
902 */
nvmem_unregister(struct nvmem_device * nvmem)903 void nvmem_unregister(struct nvmem_device *nvmem)
904 {
905 kref_put(&nvmem->refcnt, nvmem_device_release);
906 }
907 EXPORT_SYMBOL_GPL(nvmem_unregister);
908
devm_nvmem_release(struct device * dev,void * res)909 static void devm_nvmem_release(struct device *dev, void *res)
910 {
911 nvmem_unregister(*(struct nvmem_device **)res);
912 }
913
914 /**
915 * devm_nvmem_register() - Register a managed nvmem device for given
916 * nvmem_config.
917 * Also creates a binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
918 *
919 * @dev: Device that uses the nvmem device.
920 * @config: nvmem device configuration with which nvmem device is created.
921 *
922 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
923 * on success.
924 */
devm_nvmem_register(struct device * dev,const struct nvmem_config * config)925 struct nvmem_device *devm_nvmem_register(struct device *dev,
926 const struct nvmem_config *config)
927 {
928 struct nvmem_device **ptr, *nvmem;
929
930 ptr = devres_alloc(devm_nvmem_release, sizeof(*ptr), GFP_KERNEL);
931 if (!ptr)
932 return ERR_PTR(-ENOMEM);
933
934 nvmem = nvmem_register(config);
935
936 if (!IS_ERR(nvmem)) {
937 *ptr = nvmem;
938 devres_add(dev, ptr);
939 } else {
940 devres_free(ptr);
941 }
942
943 return nvmem;
944 }
945 EXPORT_SYMBOL_GPL(devm_nvmem_register);
946
devm_nvmem_match(struct device * dev,void * res,void * data)947 static int devm_nvmem_match(struct device *dev, void *res, void *data)
948 {
949 struct nvmem_device **r = res;
950
951 return *r == data;
952 }
953
954 /**
955 * devm_nvmem_unregister() - Unregister previously registered managed nvmem
956 * device.
957 *
958 * @dev: Device that uses the nvmem device.
959 * @nvmem: Pointer to previously registered nvmem device.
960 *
961 * Return: Will be negative on error or zero on success.
962 */
devm_nvmem_unregister(struct device * dev,struct nvmem_device * nvmem)963 int devm_nvmem_unregister(struct device *dev, struct nvmem_device *nvmem)
964 {
965 return devres_release(dev, devm_nvmem_release, devm_nvmem_match, nvmem);
966 }
967 EXPORT_SYMBOL(devm_nvmem_unregister);
968
__nvmem_device_get(void * data,int (* match)(struct device * dev,const void * data))969 static struct nvmem_device *__nvmem_device_get(void *data,
970 int (*match)(struct device *dev, const void *data))
971 {
972 struct nvmem_device *nvmem = NULL;
973 struct device *dev;
974
975 mutex_lock(&nvmem_mutex);
976 dev = bus_find_device(&nvmem_bus_type, NULL, data, match);
977 if (dev)
978 nvmem = to_nvmem_device(dev);
979 mutex_unlock(&nvmem_mutex);
980 if (!nvmem)
981 return ERR_PTR(-EPROBE_DEFER);
982
983 if (!try_module_get(nvmem->owner)) {
984 dev_err(&nvmem->dev,
985 "could not increase module refcount for cell %s\n",
986 nvmem_dev_name(nvmem));
987
988 put_device(&nvmem->dev);
989 return ERR_PTR(-EINVAL);
990 }
991
992 kref_get(&nvmem->refcnt);
993
994 return nvmem;
995 }
996
__nvmem_device_put(struct nvmem_device * nvmem)997 static void __nvmem_device_put(struct nvmem_device *nvmem)
998 {
999 put_device(&nvmem->dev);
1000 module_put(nvmem->owner);
1001 kref_put(&nvmem->refcnt, nvmem_device_release);
1002 }
1003
1004 #if IS_ENABLED(CONFIG_OF)
1005 /**
1006 * of_nvmem_device_get() - Get nvmem device from a given id
1007 *
1008 * @np: Device tree node that uses the nvmem device.
1009 * @id: nvmem name from nvmem-names property.
1010 *
1011 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1012 * on success.
1013 */
of_nvmem_device_get(struct device_node * np,const char * id)1014 struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
1015 {
1016
1017 struct device_node *nvmem_np;
1018 struct nvmem_device *nvmem;
1019 int index = 0;
1020
1021 if (id)
1022 index = of_property_match_string(np, "nvmem-names", id);
1023
1024 nvmem_np = of_parse_phandle(np, "nvmem", index);
1025 if (!nvmem_np)
1026 return ERR_PTR(-ENOENT);
1027
1028 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1029 of_node_put(nvmem_np);
1030 return nvmem;
1031 }
1032 EXPORT_SYMBOL_GPL(of_nvmem_device_get);
1033 #endif
1034
1035 /**
1036 * nvmem_device_get() - Get nvmem device from a given id
1037 *
1038 * @dev: Device that uses the nvmem device.
1039 * @dev_name: name of the requested nvmem device.
1040 *
1041 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1042 * on success.
1043 */
nvmem_device_get(struct device * dev,const char * dev_name)1044 struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
1045 {
1046 if (dev->of_node) { /* try dt first */
1047 struct nvmem_device *nvmem;
1048
1049 nvmem = of_nvmem_device_get(dev->of_node, dev_name);
1050
1051 if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
1052 return nvmem;
1053
1054 }
1055
1056 return __nvmem_device_get((void *)dev_name, device_match_name);
1057 }
1058 EXPORT_SYMBOL_GPL(nvmem_device_get);
1059
1060 /**
1061 * nvmem_device_find() - Find nvmem device with matching function
1062 *
1063 * @data: Data to pass to match function
1064 * @match: Callback function to check device
1065 *
1066 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
1067 * on success.
1068 */
nvmem_device_find(void * data,int (* match)(struct device * dev,const void * data))1069 struct nvmem_device *nvmem_device_find(void *data,
1070 int (*match)(struct device *dev, const void *data))
1071 {
1072 return __nvmem_device_get(data, match);
1073 }
1074 EXPORT_SYMBOL_GPL(nvmem_device_find);
1075
devm_nvmem_device_match(struct device * dev,void * res,void * data)1076 static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
1077 {
1078 struct nvmem_device **nvmem = res;
1079
1080 if (WARN_ON(!nvmem || !*nvmem))
1081 return 0;
1082
1083 return *nvmem == data;
1084 }
1085
devm_nvmem_device_release(struct device * dev,void * res)1086 static void devm_nvmem_device_release(struct device *dev, void *res)
1087 {
1088 nvmem_device_put(*(struct nvmem_device **)res);
1089 }
1090
1091 /**
1092 * devm_nvmem_device_put() - put alredy got nvmem device
1093 *
1094 * @dev: Device that uses the nvmem device.
1095 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
1096 * that needs to be released.
1097 */
devm_nvmem_device_put(struct device * dev,struct nvmem_device * nvmem)1098 void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
1099 {
1100 int ret;
1101
1102 ret = devres_release(dev, devm_nvmem_device_release,
1103 devm_nvmem_device_match, nvmem);
1104
1105 WARN_ON(ret);
1106 }
1107 EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
1108
1109 /**
1110 * nvmem_device_put() - put alredy got nvmem device
1111 *
1112 * @nvmem: pointer to nvmem device that needs to be released.
1113 */
nvmem_device_put(struct nvmem_device * nvmem)1114 void nvmem_device_put(struct nvmem_device *nvmem)
1115 {
1116 __nvmem_device_put(nvmem);
1117 }
1118 EXPORT_SYMBOL_GPL(nvmem_device_put);
1119
1120 /**
1121 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
1122 *
1123 * @dev: Device that requests the nvmem device.
1124 * @id: name id for the requested nvmem device.
1125 *
1126 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
1127 * on success. The nvmem_cell will be freed by the automatically once the
1128 * device is freed.
1129 */
devm_nvmem_device_get(struct device * dev,const char * id)1130 struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
1131 {
1132 struct nvmem_device **ptr, *nvmem;
1133
1134 ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
1135 if (!ptr)
1136 return ERR_PTR(-ENOMEM);
1137
1138 nvmem = nvmem_device_get(dev, id);
1139 if (!IS_ERR(nvmem)) {
1140 *ptr = nvmem;
1141 devres_add(dev, ptr);
1142 } else {
1143 devres_free(ptr);
1144 }
1145
1146 return nvmem;
1147 }
1148 EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
1149
1150 static struct nvmem_cell *
nvmem_cell_get_from_lookup(struct device * dev,const char * con_id)1151 nvmem_cell_get_from_lookup(struct device *dev, const char *con_id)
1152 {
1153 struct nvmem_cell *cell = ERR_PTR(-ENOENT);
1154 struct nvmem_cell_lookup *lookup;
1155 struct nvmem_device *nvmem;
1156 const char *dev_id;
1157
1158 if (!dev)
1159 return ERR_PTR(-EINVAL);
1160
1161 dev_id = dev_name(dev);
1162
1163 mutex_lock(&nvmem_lookup_mutex);
1164
1165 list_for_each_entry(lookup, &nvmem_lookup_list, node) {
1166 if ((strcmp(lookup->dev_id, dev_id) == 0) &&
1167 (strcmp(lookup->con_id, con_id) == 0)) {
1168 /* This is the right entry. */
1169 nvmem = __nvmem_device_get((void *)lookup->nvmem_name,
1170 device_match_name);
1171 if (IS_ERR(nvmem)) {
1172 /* Provider may not be registered yet. */
1173 cell = ERR_CAST(nvmem);
1174 break;
1175 }
1176
1177 cell = nvmem_find_cell_by_name(nvmem,
1178 lookup->cell_name);
1179 if (!cell) {
1180 __nvmem_device_put(nvmem);
1181 cell = ERR_PTR(-ENOENT);
1182 }
1183 break;
1184 }
1185 }
1186
1187 mutex_unlock(&nvmem_lookup_mutex);
1188 return cell;
1189 }
1190
1191 #if IS_ENABLED(CONFIG_OF)
1192 static struct nvmem_cell *
nvmem_find_cell_by_node(struct nvmem_device * nvmem,struct device_node * np)1193 nvmem_find_cell_by_node(struct nvmem_device *nvmem, struct device_node *np)
1194 {
1195 struct nvmem_cell *iter, *cell = NULL;
1196
1197 mutex_lock(&nvmem_mutex);
1198 list_for_each_entry(iter, &nvmem->cells, node) {
1199 if (np == iter->np) {
1200 cell = iter;
1201 break;
1202 }
1203 }
1204 mutex_unlock(&nvmem_mutex);
1205
1206 return cell;
1207 }
1208
1209 /**
1210 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
1211 *
1212 * @np: Device tree node that uses the nvmem cell.
1213 * @id: nvmem cell name from nvmem-cell-names property, or NULL
1214 * for the cell at index 0 (the lone cell with no accompanying
1215 * nvmem-cell-names property).
1216 *
1217 * Return: Will be an ERR_PTR() on error or a valid pointer
1218 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1219 * nvmem_cell_put().
1220 */
of_nvmem_cell_get(struct device_node * np,const char * id)1221 struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id)
1222 {
1223 struct device_node *cell_np, *nvmem_np;
1224 struct nvmem_device *nvmem;
1225 struct nvmem_cell *cell;
1226 int index = 0;
1227
1228 /* if cell name exists, find index to the name */
1229 if (id)
1230 index = of_property_match_string(np, "nvmem-cell-names", id);
1231
1232 cell_np = of_parse_phandle(np, "nvmem-cells", index);
1233 if (!cell_np)
1234 return ERR_PTR(-ENOENT);
1235
1236 nvmem_np = of_get_next_parent(cell_np);
1237 if (!nvmem_np)
1238 return ERR_PTR(-EINVAL);
1239
1240 nvmem = __nvmem_device_get(nvmem_np, device_match_of_node);
1241 of_node_put(nvmem_np);
1242 if (IS_ERR(nvmem))
1243 return ERR_CAST(nvmem);
1244
1245 cell = nvmem_find_cell_by_node(nvmem, cell_np);
1246 if (!cell) {
1247 __nvmem_device_put(nvmem);
1248 return ERR_PTR(-ENOENT);
1249 }
1250
1251 return cell;
1252 }
1253 EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
1254 #endif
1255
1256 /**
1257 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
1258 *
1259 * @dev: Device that requests the nvmem cell.
1260 * @id: nvmem cell name to get (this corresponds with the name from the
1261 * nvmem-cell-names property for DT systems and with the con_id from
1262 * the lookup entry for non-DT systems).
1263 *
1264 * Return: Will be an ERR_PTR() on error or a valid pointer
1265 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1266 * nvmem_cell_put().
1267 */
nvmem_cell_get(struct device * dev,const char * id)1268 struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *id)
1269 {
1270 struct nvmem_cell *cell;
1271
1272 if (dev->of_node) { /* try dt first */
1273 cell = of_nvmem_cell_get(dev->of_node, id);
1274 if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
1275 return cell;
1276 }
1277
1278 /* NULL cell id only allowed for device tree; invalid otherwise */
1279 if (!id)
1280 return ERR_PTR(-EINVAL);
1281
1282 return nvmem_cell_get_from_lookup(dev, id);
1283 }
1284 EXPORT_SYMBOL_GPL(nvmem_cell_get);
1285
devm_nvmem_cell_release(struct device * dev,void * res)1286 static void devm_nvmem_cell_release(struct device *dev, void *res)
1287 {
1288 nvmem_cell_put(*(struct nvmem_cell **)res);
1289 }
1290
1291 /**
1292 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
1293 *
1294 * @dev: Device that requests the nvmem cell.
1295 * @id: nvmem cell name id to get.
1296 *
1297 * Return: Will be an ERR_PTR() on error or a valid pointer
1298 * to a struct nvmem_cell. The nvmem_cell will be freed by the
1299 * automatically once the device is freed.
1300 */
devm_nvmem_cell_get(struct device * dev,const char * id)1301 struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
1302 {
1303 struct nvmem_cell **ptr, *cell;
1304
1305 ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
1306 if (!ptr)
1307 return ERR_PTR(-ENOMEM);
1308
1309 cell = nvmem_cell_get(dev, id);
1310 if (!IS_ERR(cell)) {
1311 *ptr = cell;
1312 devres_add(dev, ptr);
1313 } else {
1314 devres_free(ptr);
1315 }
1316
1317 return cell;
1318 }
1319 EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
1320
devm_nvmem_cell_match(struct device * dev,void * res,void * data)1321 static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
1322 {
1323 struct nvmem_cell **c = res;
1324
1325 if (WARN_ON(!c || !*c))
1326 return 0;
1327
1328 return *c == data;
1329 }
1330
1331 /**
1332 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
1333 * from devm_nvmem_cell_get.
1334 *
1335 * @dev: Device that requests the nvmem cell.
1336 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
1337 */
devm_nvmem_cell_put(struct device * dev,struct nvmem_cell * cell)1338 void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
1339 {
1340 int ret;
1341
1342 ret = devres_release(dev, devm_nvmem_cell_release,
1343 devm_nvmem_cell_match, cell);
1344
1345 WARN_ON(ret);
1346 }
1347 EXPORT_SYMBOL(devm_nvmem_cell_put);
1348
1349 /**
1350 * nvmem_cell_put() - Release previously allocated nvmem cell.
1351 *
1352 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
1353 */
nvmem_cell_put(struct nvmem_cell * cell)1354 void nvmem_cell_put(struct nvmem_cell *cell)
1355 {
1356 struct nvmem_device *nvmem = cell->nvmem;
1357
1358 __nvmem_device_put(nvmem);
1359 }
1360 EXPORT_SYMBOL_GPL(nvmem_cell_put);
1361
nvmem_shift_read_buffer_in_place(struct nvmem_cell * cell,void * buf)1362 static void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell, void *buf)
1363 {
1364 u8 *p, *b;
1365 int i, extra, bit_offset = cell->bit_offset;
1366
1367 p = b = buf;
1368 if (bit_offset) {
1369 /* First shift */
1370 *b++ >>= bit_offset;
1371
1372 /* setup rest of the bytes if any */
1373 for (i = 1; i < cell->bytes; i++) {
1374 /* Get bits from next byte and shift them towards msb */
1375 *p |= *b << (BITS_PER_BYTE - bit_offset);
1376
1377 p = b;
1378 *b++ >>= bit_offset;
1379 }
1380 } else {
1381 /* point to the msb */
1382 p += cell->bytes - 1;
1383 }
1384
1385 /* result fits in less bytes */
1386 extra = cell->bytes - DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE);
1387 while (--extra >= 0)
1388 *p-- = 0;
1389
1390 /* clear msb bits if any leftover in the last byte */
1391 if (cell->nbits % BITS_PER_BYTE)
1392 *p &= GENMASK((cell->nbits % BITS_PER_BYTE) - 1, 0);
1393 }
1394
__nvmem_cell_read(struct nvmem_device * nvmem,struct nvmem_cell * cell,void * buf,size_t * len)1395 static int __nvmem_cell_read(struct nvmem_device *nvmem,
1396 struct nvmem_cell *cell,
1397 void *buf, size_t *len)
1398 {
1399 int rc;
1400
1401 rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
1402
1403 if (rc)
1404 return rc;
1405
1406 /* shift bits in-place */
1407 if (cell->bit_offset || cell->nbits)
1408 nvmem_shift_read_buffer_in_place(cell, buf);
1409
1410 if (len)
1411 *len = cell->bytes;
1412
1413 return 0;
1414 }
1415
1416 /**
1417 * nvmem_cell_read() - Read a given nvmem cell
1418 *
1419 * @cell: nvmem cell to be read.
1420 * @len: pointer to length of cell which will be populated on successful read;
1421 * can be NULL.
1422 *
1423 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1424 * buffer should be freed by the consumer with a kfree().
1425 */
nvmem_cell_read(struct nvmem_cell * cell,size_t * len)1426 void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1427 {
1428 struct nvmem_device *nvmem = cell->nvmem;
1429 u8 *buf;
1430 int rc;
1431
1432 if (!nvmem)
1433 return ERR_PTR(-EINVAL);
1434
1435 buf = kzalloc(cell->bytes, GFP_KERNEL);
1436 if (!buf)
1437 return ERR_PTR(-ENOMEM);
1438
1439 rc = __nvmem_cell_read(nvmem, cell, buf, len);
1440 if (rc) {
1441 kfree(buf);
1442 return ERR_PTR(rc);
1443 }
1444
1445 return buf;
1446 }
1447 EXPORT_SYMBOL_GPL(nvmem_cell_read);
1448
nvmem_cell_prepare_write_buffer(struct nvmem_cell * cell,u8 * _buf,int len)1449 static void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
1450 u8 *_buf, int len)
1451 {
1452 struct nvmem_device *nvmem = cell->nvmem;
1453 int i, rc, nbits, bit_offset = cell->bit_offset;
1454 u8 v, *p, *buf, *b, pbyte, pbits;
1455
1456 nbits = cell->nbits;
1457 buf = kzalloc(cell->bytes, GFP_KERNEL);
1458 if (!buf)
1459 return ERR_PTR(-ENOMEM);
1460
1461 memcpy(buf, _buf, len);
1462 p = b = buf;
1463
1464 if (bit_offset) {
1465 pbyte = *b;
1466 *b <<= bit_offset;
1467
1468 /* setup the first byte with lsb bits from nvmem */
1469 rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1470 if (rc)
1471 goto err;
1472 *b++ |= GENMASK(bit_offset - 1, 0) & v;
1473
1474 /* setup rest of the byte if any */
1475 for (i = 1; i < cell->bytes; i++) {
1476 /* Get last byte bits and shift them towards lsb */
1477 pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1478 pbyte = *b;
1479 p = b;
1480 *b <<= bit_offset;
1481 *b++ |= pbits;
1482 }
1483 }
1484
1485 /* if it's not end on byte boundary */
1486 if ((nbits + bit_offset) % BITS_PER_BYTE) {
1487 /* setup the last byte with msb bits from nvmem */
1488 rc = nvmem_reg_read(nvmem,
1489 cell->offset + cell->bytes - 1, &v, 1);
1490 if (rc)
1491 goto err;
1492 *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1493
1494 }
1495
1496 return buf;
1497 err:
1498 kfree(buf);
1499 return ERR_PTR(rc);
1500 }
1501
1502 /**
1503 * nvmem_cell_write() - Write to a given nvmem cell
1504 *
1505 * @cell: nvmem cell to be written.
1506 * @buf: Buffer to be written.
1507 * @len: length of buffer to be written to nvmem cell.
1508 *
1509 * Return: length of bytes written or negative on failure.
1510 */
nvmem_cell_write(struct nvmem_cell * cell,void * buf,size_t len)1511 int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1512 {
1513 struct nvmem_device *nvmem = cell->nvmem;
1514 int rc;
1515
1516 if (!nvmem || nvmem->read_only ||
1517 (cell->bit_offset == 0 && len != cell->bytes))
1518 return -EINVAL;
1519
1520 if (cell->bit_offset || cell->nbits) {
1521 buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1522 if (IS_ERR(buf))
1523 return PTR_ERR(buf);
1524 }
1525
1526 rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1527
1528 /* free the tmp buffer */
1529 if (cell->bit_offset || cell->nbits)
1530 kfree(buf);
1531
1532 if (rc)
1533 return rc;
1534
1535 return len;
1536 }
1537 EXPORT_SYMBOL_GPL(nvmem_cell_write);
1538
nvmem_cell_read_common(struct device * dev,const char * cell_id,void * val,size_t count)1539 static int nvmem_cell_read_common(struct device *dev, const char *cell_id,
1540 void *val, size_t count)
1541 {
1542 struct nvmem_cell *cell;
1543 void *buf;
1544 size_t len;
1545
1546 cell = nvmem_cell_get(dev, cell_id);
1547 if (IS_ERR(cell))
1548 return PTR_ERR(cell);
1549
1550 buf = nvmem_cell_read(cell, &len);
1551 if (IS_ERR(buf)) {
1552 nvmem_cell_put(cell);
1553 return PTR_ERR(buf);
1554 }
1555 if (len != count) {
1556 kfree(buf);
1557 nvmem_cell_put(cell);
1558 return -EINVAL;
1559 }
1560 memcpy(val, buf, count);
1561 kfree(buf);
1562 nvmem_cell_put(cell);
1563
1564 return 0;
1565 }
1566
1567 /**
1568 * nvmem_cell_read_u8() - Read a cell value as a u8
1569 *
1570 * @dev: Device that requests the nvmem cell.
1571 * @cell_id: Name of nvmem cell to read.
1572 * @val: pointer to output value.
1573 *
1574 * Return: 0 on success or negative errno.
1575 */
nvmem_cell_read_u8(struct device * dev,const char * cell_id,u8 * val)1576 int nvmem_cell_read_u8(struct device *dev, const char *cell_id, u8 *val)
1577 {
1578 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1579 }
1580 EXPORT_SYMBOL_GPL(nvmem_cell_read_u8);
1581
1582 /**
1583 * nvmem_cell_read_u16() - Read a cell value as a u16
1584 *
1585 * @dev: Device that requests the nvmem cell.
1586 * @cell_id: Name of nvmem cell to read.
1587 * @val: pointer to output value.
1588 *
1589 * Return: 0 on success or negative errno.
1590 */
nvmem_cell_read_u16(struct device * dev,const char * cell_id,u16 * val)1591 int nvmem_cell_read_u16(struct device *dev, const char *cell_id, u16 *val)
1592 {
1593 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1594 }
1595 EXPORT_SYMBOL_GPL(nvmem_cell_read_u16);
1596
1597 /**
1598 * nvmem_cell_read_u32() - Read a cell value as a u32
1599 *
1600 * @dev: Device that requests the nvmem cell.
1601 * @cell_id: Name of nvmem cell to read.
1602 * @val: pointer to output value.
1603 *
1604 * Return: 0 on success or negative errno.
1605 */
nvmem_cell_read_u32(struct device * dev,const char * cell_id,u32 * val)1606 int nvmem_cell_read_u32(struct device *dev, const char *cell_id, u32 *val)
1607 {
1608 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1609 }
1610 EXPORT_SYMBOL_GPL(nvmem_cell_read_u32);
1611
1612 /**
1613 * nvmem_cell_read_u64() - Read a cell value as a u64
1614 *
1615 * @dev: Device that requests the nvmem cell.
1616 * @cell_id: Name of nvmem cell to read.
1617 * @val: pointer to output value.
1618 *
1619 * Return: 0 on success or negative errno.
1620 */
nvmem_cell_read_u64(struct device * dev,const char * cell_id,u64 * val)1621 int nvmem_cell_read_u64(struct device *dev, const char *cell_id, u64 *val)
1622 {
1623 return nvmem_cell_read_common(dev, cell_id, val, sizeof(*val));
1624 }
1625 EXPORT_SYMBOL_GPL(nvmem_cell_read_u64);
1626
nvmem_cell_read_variable_common(struct device * dev,const char * cell_id,size_t max_len,size_t * len)1627 static const void *nvmem_cell_read_variable_common(struct device *dev,
1628 const char *cell_id,
1629 size_t max_len, size_t *len)
1630 {
1631 struct nvmem_cell *cell;
1632 int nbits;
1633 void *buf;
1634
1635 cell = nvmem_cell_get(dev, cell_id);
1636 if (IS_ERR(cell))
1637 return cell;
1638
1639 nbits = cell->nbits;
1640 buf = nvmem_cell_read(cell, len);
1641 nvmem_cell_put(cell);
1642 if (IS_ERR(buf))
1643 return buf;
1644
1645 /*
1646 * If nbits is set then nvmem_cell_read() can significantly exaggerate
1647 * the length of the real data. Throw away the extra junk.
1648 */
1649 if (nbits)
1650 *len = DIV_ROUND_UP(nbits, 8);
1651
1652 if (*len > max_len) {
1653 kfree(buf);
1654 return ERR_PTR(-ERANGE);
1655 }
1656
1657 return buf;
1658 }
1659
1660 /**
1661 * nvmem_cell_read_variable_le_u32() - Read up to 32-bits of data as a little endian number.
1662 *
1663 * @dev: Device that requests the nvmem cell.
1664 * @cell_id: Name of nvmem cell to read.
1665 * @val: pointer to output value.
1666 *
1667 * Return: 0 on success or negative errno.
1668 */
nvmem_cell_read_variable_le_u32(struct device * dev,const char * cell_id,u32 * val)1669 int nvmem_cell_read_variable_le_u32(struct device *dev, const char *cell_id,
1670 u32 *val)
1671 {
1672 size_t len;
1673 const u8 *buf;
1674 int i;
1675
1676 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1677 if (IS_ERR(buf))
1678 return PTR_ERR(buf);
1679
1680 /* Copy w/ implicit endian conversion */
1681 *val = 0;
1682 for (i = 0; i < len; i++)
1683 *val |= buf[i] << (8 * i);
1684
1685 kfree(buf);
1686
1687 return 0;
1688 }
1689 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u32);
1690
1691 /**
1692 * nvmem_cell_read_variable_le_u64() - Read up to 64-bits of data as a little endian number.
1693 *
1694 * @dev: Device that requests the nvmem cell.
1695 * @cell_id: Name of nvmem cell to read.
1696 * @val: pointer to output value.
1697 *
1698 * Return: 0 on success or negative errno.
1699 */
nvmem_cell_read_variable_le_u64(struct device * dev,const char * cell_id,u64 * val)1700 int nvmem_cell_read_variable_le_u64(struct device *dev, const char *cell_id,
1701 u64 *val)
1702 {
1703 size_t len;
1704 const u8 *buf;
1705 int i;
1706
1707 buf = nvmem_cell_read_variable_common(dev, cell_id, sizeof(*val), &len);
1708 if (IS_ERR(buf))
1709 return PTR_ERR(buf);
1710
1711 /* Copy w/ implicit endian conversion */
1712 *val = 0;
1713 for (i = 0; i < len; i++)
1714 *val |= (uint64_t)buf[i] << (8 * i);
1715
1716 kfree(buf);
1717
1718 return 0;
1719 }
1720 EXPORT_SYMBOL_GPL(nvmem_cell_read_variable_le_u64);
1721
1722 /**
1723 * nvmem_device_cell_read() - Read a given nvmem device and cell
1724 *
1725 * @nvmem: nvmem device to read from.
1726 * @info: nvmem cell info to be read.
1727 * @buf: buffer pointer which will be populated on successful read.
1728 *
1729 * Return: length of successful bytes read on success and negative
1730 * error code on error.
1731 */
nvmem_device_cell_read(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1732 ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1733 struct nvmem_cell_info *info, void *buf)
1734 {
1735 struct nvmem_cell cell;
1736 int rc;
1737 ssize_t len;
1738
1739 if (!nvmem)
1740 return -EINVAL;
1741
1742 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
1743 if (rc)
1744 return rc;
1745
1746 rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1747 if (rc)
1748 return rc;
1749
1750 return len;
1751 }
1752 EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1753
1754 /**
1755 * nvmem_device_cell_write() - Write cell to a given nvmem device
1756 *
1757 * @nvmem: nvmem device to be written to.
1758 * @info: nvmem cell info to be written.
1759 * @buf: buffer to be written to cell.
1760 *
1761 * Return: length of bytes written or negative error code on failure.
1762 */
nvmem_device_cell_write(struct nvmem_device * nvmem,struct nvmem_cell_info * info,void * buf)1763 int nvmem_device_cell_write(struct nvmem_device *nvmem,
1764 struct nvmem_cell_info *info, void *buf)
1765 {
1766 struct nvmem_cell cell;
1767 int rc;
1768
1769 if (!nvmem)
1770 return -EINVAL;
1771
1772 rc = nvmem_cell_info_to_nvmem_cell_nodup(nvmem, info, &cell);
1773 if (rc)
1774 return rc;
1775
1776 return nvmem_cell_write(&cell, buf, cell.bytes);
1777 }
1778 EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1779
1780 /**
1781 * nvmem_device_read() - Read from a given nvmem device
1782 *
1783 * @nvmem: nvmem device to read from.
1784 * @offset: offset in nvmem device.
1785 * @bytes: number of bytes to read.
1786 * @buf: buffer pointer which will be populated on successful read.
1787 *
1788 * Return: length of successful bytes read on success and negative
1789 * error code on error.
1790 */
nvmem_device_read(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)1791 int nvmem_device_read(struct nvmem_device *nvmem,
1792 unsigned int offset,
1793 size_t bytes, void *buf)
1794 {
1795 int rc;
1796
1797 if (!nvmem)
1798 return -EINVAL;
1799
1800 rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1801
1802 if (rc)
1803 return rc;
1804
1805 return bytes;
1806 }
1807 EXPORT_SYMBOL_GPL(nvmem_device_read);
1808
1809 /**
1810 * nvmem_device_write() - Write cell to a given nvmem device
1811 *
1812 * @nvmem: nvmem device to be written to.
1813 * @offset: offset in nvmem device.
1814 * @bytes: number of bytes to write.
1815 * @buf: buffer to be written.
1816 *
1817 * Return: length of bytes written or negative error code on failure.
1818 */
nvmem_device_write(struct nvmem_device * nvmem,unsigned int offset,size_t bytes,void * buf)1819 int nvmem_device_write(struct nvmem_device *nvmem,
1820 unsigned int offset,
1821 size_t bytes, void *buf)
1822 {
1823 int rc;
1824
1825 if (!nvmem)
1826 return -EINVAL;
1827
1828 rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1829
1830 if (rc)
1831 return rc;
1832
1833
1834 return bytes;
1835 }
1836 EXPORT_SYMBOL_GPL(nvmem_device_write);
1837
1838 /**
1839 * nvmem_add_cell_table() - register a table of cell info entries
1840 *
1841 * @table: table of cell info entries
1842 */
nvmem_add_cell_table(struct nvmem_cell_table * table)1843 void nvmem_add_cell_table(struct nvmem_cell_table *table)
1844 {
1845 mutex_lock(&nvmem_cell_mutex);
1846 list_add_tail(&table->node, &nvmem_cell_tables);
1847 mutex_unlock(&nvmem_cell_mutex);
1848 }
1849 EXPORT_SYMBOL_GPL(nvmem_add_cell_table);
1850
1851 /**
1852 * nvmem_del_cell_table() - remove a previously registered cell info table
1853 *
1854 * @table: table of cell info entries
1855 */
nvmem_del_cell_table(struct nvmem_cell_table * table)1856 void nvmem_del_cell_table(struct nvmem_cell_table *table)
1857 {
1858 mutex_lock(&nvmem_cell_mutex);
1859 list_del(&table->node);
1860 mutex_unlock(&nvmem_cell_mutex);
1861 }
1862 EXPORT_SYMBOL_GPL(nvmem_del_cell_table);
1863
1864 /**
1865 * nvmem_add_cell_lookups() - register a list of cell lookup entries
1866 *
1867 * @entries: array of cell lookup entries
1868 * @nentries: number of cell lookup entries in the array
1869 */
nvmem_add_cell_lookups(struct nvmem_cell_lookup * entries,size_t nentries)1870 void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1871 {
1872 int i;
1873
1874 mutex_lock(&nvmem_lookup_mutex);
1875 for (i = 0; i < nentries; i++)
1876 list_add_tail(&entries[i].node, &nvmem_lookup_list);
1877 mutex_unlock(&nvmem_lookup_mutex);
1878 }
1879 EXPORT_SYMBOL_GPL(nvmem_add_cell_lookups);
1880
1881 /**
1882 * nvmem_del_cell_lookups() - remove a list of previously added cell lookup
1883 * entries
1884 *
1885 * @entries: array of cell lookup entries
1886 * @nentries: number of cell lookup entries in the array
1887 */
nvmem_del_cell_lookups(struct nvmem_cell_lookup * entries,size_t nentries)1888 void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries)
1889 {
1890 int i;
1891
1892 mutex_lock(&nvmem_lookup_mutex);
1893 for (i = 0; i < nentries; i++)
1894 list_del(&entries[i].node);
1895 mutex_unlock(&nvmem_lookup_mutex);
1896 }
1897 EXPORT_SYMBOL_GPL(nvmem_del_cell_lookups);
1898
1899 /**
1900 * nvmem_dev_name() - Get the name of a given nvmem device.
1901 *
1902 * @nvmem: nvmem device.
1903 *
1904 * Return: name of the nvmem device.
1905 */
nvmem_dev_name(struct nvmem_device * nvmem)1906 const char *nvmem_dev_name(struct nvmem_device *nvmem)
1907 {
1908 return dev_name(&nvmem->dev);
1909 }
1910 EXPORT_SYMBOL_GPL(nvmem_dev_name);
1911
nvmem_init(void)1912 static int __init nvmem_init(void)
1913 {
1914 return bus_register(&nvmem_bus_type);
1915 }
1916
nvmem_exit(void)1917 static void __exit nvmem_exit(void)
1918 {
1919 bus_unregister(&nvmem_bus_type);
1920 }
1921
1922 subsys_initcall(nvmem_init);
1923 module_exit(nvmem_exit);
1924
1925 MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1926 MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1927 MODULE_DESCRIPTION("nvmem Driver Core");
1928 MODULE_LICENSE("GPL v2");
1929