1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Register map access API
4 //
5 // Copyright 2011 Wolfson Microelectronics plc
6 //
7 // Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8
9 #include <linux/device.h>
10 #include <linux/slab.h>
11 #include <linux/export.h>
12 #include <linux/mutex.h>
13 #include <linux/err.h>
14 #include <linux/property.h>
15 #include <linux/rbtree.h>
16 #include <linux/sched.h>
17 #include <linux/delay.h>
18 #include <linux/log2.h>
19 #include <linux/hwspinlock.h>
20 #include <linux/unaligned.h>
21
22 #define CREATE_TRACE_POINTS
23 #include "trace.h"
24
25 #include "internal.h"
26
27 /*
28 * Sometimes for failures during very early init the trace
29 * infrastructure isn't available early enough to be used. For this
30 * sort of problem defining LOG_DEVICE will add printks for basic
31 * register I/O on a specific device.
32 */
33 #undef LOG_DEVICE
34
35 #ifdef LOG_DEVICE
regmap_should_log(struct regmap * map)36 static inline bool regmap_should_log(struct regmap *map)
37 {
38 return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
39 }
40 #else
regmap_should_log(struct regmap * map)41 static inline bool regmap_should_log(struct regmap *map) { return false; }
42 #endif
43
44
45 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
46 unsigned int mask, unsigned int val,
47 bool *change, bool force_write);
48
49 static int _regmap_bus_reg_read(void *context, unsigned int reg,
50 unsigned int *val);
51 static int _regmap_bus_read(void *context, unsigned int reg,
52 unsigned int *val);
53 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
54 unsigned int val);
55 static int _regmap_bus_reg_write(void *context, unsigned int reg,
56 unsigned int val);
57 static int _regmap_bus_raw_write(void *context, unsigned int reg,
58 unsigned int val);
59
regmap_reg_in_ranges(unsigned int reg,const struct regmap_range * ranges,unsigned int nranges)60 bool regmap_reg_in_ranges(unsigned int reg,
61 const struct regmap_range *ranges,
62 unsigned int nranges)
63 {
64 const struct regmap_range *r;
65 int i;
66
67 for (i = 0, r = ranges; i < nranges; i++, r++)
68 if (regmap_reg_in_range(reg, r))
69 return true;
70 return false;
71 }
72 EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
73
regmap_check_range_table(struct regmap * map,unsigned int reg,const struct regmap_access_table * table)74 bool regmap_check_range_table(struct regmap *map, unsigned int reg,
75 const struct regmap_access_table *table)
76 {
77 /* Check "no ranges" first */
78 if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
79 return false;
80
81 /* In case zero "yes ranges" are supplied, any reg is OK */
82 if (!table->n_yes_ranges)
83 return true;
84
85 return regmap_reg_in_ranges(reg, table->yes_ranges,
86 table->n_yes_ranges);
87 }
88 EXPORT_SYMBOL_GPL(regmap_check_range_table);
89
regmap_writeable(struct regmap * map,unsigned int reg)90 bool regmap_writeable(struct regmap *map, unsigned int reg)
91 {
92 if (map->max_register_is_set && reg > map->max_register)
93 return false;
94
95 if (map->writeable_reg)
96 return map->writeable_reg(map->dev, reg);
97
98 if (map->wr_table)
99 return regmap_check_range_table(map, reg, map->wr_table);
100
101 return true;
102 }
103
regmap_cached(struct regmap * map,unsigned int reg)104 bool regmap_cached(struct regmap *map, unsigned int reg)
105 {
106 int ret;
107 unsigned int val;
108
109 if (map->cache_type == REGCACHE_NONE)
110 return false;
111
112 if (!map->cache_ops)
113 return false;
114
115 if (map->max_register_is_set && reg > map->max_register)
116 return false;
117
118 map->lock(map->lock_arg);
119 ret = regcache_read(map, reg, &val);
120 map->unlock(map->lock_arg);
121 if (ret)
122 return false;
123
124 return true;
125 }
126
regmap_readable(struct regmap * map,unsigned int reg)127 bool regmap_readable(struct regmap *map, unsigned int reg)
128 {
129 if (!map->reg_read)
130 return false;
131
132 if (map->max_register_is_set && reg > map->max_register)
133 return false;
134
135 if (map->format.format_write)
136 return false;
137
138 if (map->readable_reg)
139 return map->readable_reg(map->dev, reg);
140
141 if (map->rd_table)
142 return regmap_check_range_table(map, reg, map->rd_table);
143
144 return true;
145 }
146
regmap_volatile(struct regmap * map,unsigned int reg)147 bool regmap_volatile(struct regmap *map, unsigned int reg)
148 {
149 if (!map->format.format_write && !regmap_readable(map, reg))
150 return false;
151
152 if (map->volatile_reg)
153 return map->volatile_reg(map->dev, reg);
154
155 if (map->volatile_table)
156 return regmap_check_range_table(map, reg, map->volatile_table);
157
158 if (map->cache_ops)
159 return false;
160 else
161 return true;
162 }
163
regmap_precious(struct regmap * map,unsigned int reg)164 bool regmap_precious(struct regmap *map, unsigned int reg)
165 {
166 if (!regmap_readable(map, reg))
167 return false;
168
169 if (map->precious_reg)
170 return map->precious_reg(map->dev, reg);
171
172 if (map->precious_table)
173 return regmap_check_range_table(map, reg, map->precious_table);
174
175 return false;
176 }
177
regmap_writeable_noinc(struct regmap * map,unsigned int reg)178 bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
179 {
180 if (map->writeable_noinc_reg)
181 return map->writeable_noinc_reg(map->dev, reg);
182
183 if (map->wr_noinc_table)
184 return regmap_check_range_table(map, reg, map->wr_noinc_table);
185
186 return true;
187 }
188
regmap_readable_noinc(struct regmap * map,unsigned int reg)189 bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
190 {
191 if (map->readable_noinc_reg)
192 return map->readable_noinc_reg(map->dev, reg);
193
194 if (map->rd_noinc_table)
195 return regmap_check_range_table(map, reg, map->rd_noinc_table);
196
197 return true;
198 }
199
regmap_volatile_range(struct regmap * map,unsigned int reg,size_t num)200 static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
201 size_t num)
202 {
203 unsigned int i;
204
205 for (i = 0; i < num; i++)
206 if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
207 return false;
208
209 return true;
210 }
211
regmap_format_12_20_write(struct regmap * map,unsigned int reg,unsigned int val)212 static void regmap_format_12_20_write(struct regmap *map,
213 unsigned int reg, unsigned int val)
214 {
215 u8 *out = map->work_buf;
216
217 out[0] = reg >> 4;
218 out[1] = (reg << 4) | (val >> 16);
219 out[2] = val >> 8;
220 out[3] = val;
221 }
222
223
regmap_format_2_6_write(struct regmap * map,unsigned int reg,unsigned int val)224 static void regmap_format_2_6_write(struct regmap *map,
225 unsigned int reg, unsigned int val)
226 {
227 u8 *out = map->work_buf;
228
229 *out = (reg << 6) | val;
230 }
231
regmap_format_4_12_write(struct regmap * map,unsigned int reg,unsigned int val)232 static void regmap_format_4_12_write(struct regmap *map,
233 unsigned int reg, unsigned int val)
234 {
235 __be16 *out = map->work_buf;
236 *out = cpu_to_be16((reg << 12) | val);
237 }
238
regmap_format_7_9_write(struct regmap * map,unsigned int reg,unsigned int val)239 static void regmap_format_7_9_write(struct regmap *map,
240 unsigned int reg, unsigned int val)
241 {
242 __be16 *out = map->work_buf;
243 *out = cpu_to_be16((reg << 9) | val);
244 }
245
regmap_format_7_17_write(struct regmap * map,unsigned int reg,unsigned int val)246 static void regmap_format_7_17_write(struct regmap *map,
247 unsigned int reg, unsigned int val)
248 {
249 u8 *out = map->work_buf;
250
251 out[2] = val;
252 out[1] = val >> 8;
253 out[0] = (val >> 16) | (reg << 1);
254 }
255
regmap_format_10_14_write(struct regmap * map,unsigned int reg,unsigned int val)256 static void regmap_format_10_14_write(struct regmap *map,
257 unsigned int reg, unsigned int val)
258 {
259 u8 *out = map->work_buf;
260
261 out[2] = val;
262 out[1] = (val >> 8) | (reg << 6);
263 out[0] = reg >> 2;
264 }
265
regmap_format_8(void * buf,unsigned int val,unsigned int shift)266 static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
267 {
268 u8 *b = buf;
269
270 b[0] = val << shift;
271 }
272
regmap_format_16_be(void * buf,unsigned int val,unsigned int shift)273 static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
274 {
275 put_unaligned_be16(val << shift, buf);
276 }
277
regmap_format_16_le(void * buf,unsigned int val,unsigned int shift)278 static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
279 {
280 put_unaligned_le16(val << shift, buf);
281 }
282
regmap_format_16_native(void * buf,unsigned int val,unsigned int shift)283 static void regmap_format_16_native(void *buf, unsigned int val,
284 unsigned int shift)
285 {
286 u16 v = val << shift;
287
288 memcpy(buf, &v, sizeof(v));
289 }
290
regmap_format_24_be(void * buf,unsigned int val,unsigned int shift)291 static void regmap_format_24_be(void *buf, unsigned int val, unsigned int shift)
292 {
293 put_unaligned_be24(val << shift, buf);
294 }
295
regmap_format_32_be(void * buf,unsigned int val,unsigned int shift)296 static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
297 {
298 put_unaligned_be32(val << shift, buf);
299 }
300
regmap_format_32_le(void * buf,unsigned int val,unsigned int shift)301 static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
302 {
303 put_unaligned_le32(val << shift, buf);
304 }
305
regmap_format_32_native(void * buf,unsigned int val,unsigned int shift)306 static void regmap_format_32_native(void *buf, unsigned int val,
307 unsigned int shift)
308 {
309 u32 v = val << shift;
310
311 memcpy(buf, &v, sizeof(v));
312 }
313
regmap_parse_inplace_noop(void * buf)314 static void regmap_parse_inplace_noop(void *buf)
315 {
316 }
317
regmap_parse_8(const void * buf)318 static unsigned int regmap_parse_8(const void *buf)
319 {
320 const u8 *b = buf;
321
322 return b[0];
323 }
324
regmap_parse_16_be(const void * buf)325 static unsigned int regmap_parse_16_be(const void *buf)
326 {
327 return get_unaligned_be16(buf);
328 }
329
regmap_parse_16_le(const void * buf)330 static unsigned int regmap_parse_16_le(const void *buf)
331 {
332 return get_unaligned_le16(buf);
333 }
334
regmap_parse_16_be_inplace(void * buf)335 static void regmap_parse_16_be_inplace(void *buf)
336 {
337 u16 v = get_unaligned_be16(buf);
338
339 memcpy(buf, &v, sizeof(v));
340 }
341
regmap_parse_16_le_inplace(void * buf)342 static void regmap_parse_16_le_inplace(void *buf)
343 {
344 u16 v = get_unaligned_le16(buf);
345
346 memcpy(buf, &v, sizeof(v));
347 }
348
regmap_parse_16_native(const void * buf)349 static unsigned int regmap_parse_16_native(const void *buf)
350 {
351 u16 v;
352
353 memcpy(&v, buf, sizeof(v));
354 return v;
355 }
356
regmap_parse_24_be(const void * buf)357 static unsigned int regmap_parse_24_be(const void *buf)
358 {
359 return get_unaligned_be24(buf);
360 }
361
regmap_parse_32_be(const void * buf)362 static unsigned int regmap_parse_32_be(const void *buf)
363 {
364 return get_unaligned_be32(buf);
365 }
366
regmap_parse_32_le(const void * buf)367 static unsigned int regmap_parse_32_le(const void *buf)
368 {
369 return get_unaligned_le32(buf);
370 }
371
regmap_parse_32_be_inplace(void * buf)372 static void regmap_parse_32_be_inplace(void *buf)
373 {
374 u32 v = get_unaligned_be32(buf);
375
376 memcpy(buf, &v, sizeof(v));
377 }
378
regmap_parse_32_le_inplace(void * buf)379 static void regmap_parse_32_le_inplace(void *buf)
380 {
381 u32 v = get_unaligned_le32(buf);
382
383 memcpy(buf, &v, sizeof(v));
384 }
385
regmap_parse_32_native(const void * buf)386 static unsigned int regmap_parse_32_native(const void *buf)
387 {
388 u32 v;
389
390 memcpy(&v, buf, sizeof(v));
391 return v;
392 }
393
regmap_lock_hwlock(void * __map)394 static void regmap_lock_hwlock(void *__map)
395 {
396 struct regmap *map = __map;
397
398 hwspin_lock_timeout(map->hwlock, UINT_MAX);
399 }
400
regmap_lock_hwlock_irq(void * __map)401 static void regmap_lock_hwlock_irq(void *__map)
402 {
403 struct regmap *map = __map;
404
405 hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
406 }
407
regmap_lock_hwlock_irqsave(void * __map)408 static void regmap_lock_hwlock_irqsave(void *__map)
409 {
410 struct regmap *map = __map;
411
412 hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
413 &map->spinlock_flags);
414 }
415
regmap_unlock_hwlock(void * __map)416 static void regmap_unlock_hwlock(void *__map)
417 {
418 struct regmap *map = __map;
419
420 hwspin_unlock(map->hwlock);
421 }
422
regmap_unlock_hwlock_irq(void * __map)423 static void regmap_unlock_hwlock_irq(void *__map)
424 {
425 struct regmap *map = __map;
426
427 hwspin_unlock_irq(map->hwlock);
428 }
429
regmap_unlock_hwlock_irqrestore(void * __map)430 static void regmap_unlock_hwlock_irqrestore(void *__map)
431 {
432 struct regmap *map = __map;
433
434 hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
435 }
436
regmap_lock_unlock_none(void * __map)437 static void regmap_lock_unlock_none(void *__map)
438 {
439
440 }
441
regmap_lock_mutex(void * __map)442 static void regmap_lock_mutex(void *__map)
443 {
444 struct regmap *map = __map;
445 mutex_lock(&map->mutex);
446 }
447
regmap_unlock_mutex(void * __map)448 static void regmap_unlock_mutex(void *__map)
449 {
450 struct regmap *map = __map;
451 mutex_unlock(&map->mutex);
452 }
453
regmap_lock_spinlock(void * __map)454 static void regmap_lock_spinlock(void *__map)
455 __acquires(&map->spinlock)
456 {
457 struct regmap *map = __map;
458 unsigned long flags;
459
460 spin_lock_irqsave(&map->spinlock, flags);
461 map->spinlock_flags = flags;
462 }
463
regmap_unlock_spinlock(void * __map)464 static void regmap_unlock_spinlock(void *__map)
465 __releases(&map->spinlock)
466 {
467 struct regmap *map = __map;
468 spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
469 }
470
regmap_lock_raw_spinlock(void * __map)471 static void regmap_lock_raw_spinlock(void *__map)
472 __acquires(&map->raw_spinlock)
473 {
474 struct regmap *map = __map;
475 unsigned long flags;
476
477 raw_spin_lock_irqsave(&map->raw_spinlock, flags);
478 map->raw_spinlock_flags = flags;
479 }
480
regmap_unlock_raw_spinlock(void * __map)481 static void regmap_unlock_raw_spinlock(void *__map)
482 __releases(&map->raw_spinlock)
483 {
484 struct regmap *map = __map;
485 raw_spin_unlock_irqrestore(&map->raw_spinlock, map->raw_spinlock_flags);
486 }
487
dev_get_regmap_release(struct device * dev,void * res)488 static void dev_get_regmap_release(struct device *dev, void *res)
489 {
490 /*
491 * We don't actually have anything to do here; the goal here
492 * is not to manage the regmap but to provide a simple way to
493 * get the regmap back given a struct device.
494 */
495 }
496
_regmap_range_add(struct regmap * map,struct regmap_range_node * data)497 static bool _regmap_range_add(struct regmap *map,
498 struct regmap_range_node *data)
499 {
500 struct rb_root *root = &map->range_tree;
501 struct rb_node **new = &(root->rb_node), *parent = NULL;
502
503 while (*new) {
504 struct regmap_range_node *this =
505 rb_entry(*new, struct regmap_range_node, node);
506
507 parent = *new;
508 if (data->range_max < this->range_min)
509 new = &((*new)->rb_left);
510 else if (data->range_min > this->range_max)
511 new = &((*new)->rb_right);
512 else
513 return false;
514 }
515
516 rb_link_node(&data->node, parent, new);
517 rb_insert_color(&data->node, root);
518
519 return true;
520 }
521
_regmap_range_lookup(struct regmap * map,unsigned int reg)522 static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
523 unsigned int reg)
524 {
525 struct rb_node *node = map->range_tree.rb_node;
526
527 while (node) {
528 struct regmap_range_node *this =
529 rb_entry(node, struct regmap_range_node, node);
530
531 if (reg < this->range_min)
532 node = node->rb_left;
533 else if (reg > this->range_max)
534 node = node->rb_right;
535 else
536 return this;
537 }
538
539 return NULL;
540 }
541
regmap_range_exit(struct regmap * map)542 static void regmap_range_exit(struct regmap *map)
543 {
544 struct rb_node *next;
545 struct regmap_range_node *range_node;
546
547 next = rb_first(&map->range_tree);
548 while (next) {
549 range_node = rb_entry(next, struct regmap_range_node, node);
550 next = rb_next(&range_node->node);
551 rb_erase(&range_node->node, &map->range_tree);
552 kfree(range_node);
553 }
554
555 kfree(map->selector_work_buf);
556 }
557
regmap_set_name(struct regmap * map,const struct regmap_config * config)558 static int regmap_set_name(struct regmap *map, const struct regmap_config *config)
559 {
560 if (config->name) {
561 const char *name = kstrdup_const(config->name, GFP_KERNEL);
562
563 if (!name)
564 return -ENOMEM;
565
566 kfree_const(map->name);
567 map->name = name;
568 }
569
570 return 0;
571 }
572
regmap_attach_dev(struct device * dev,struct regmap * map,const struct regmap_config * config)573 int regmap_attach_dev(struct device *dev, struct regmap *map,
574 const struct regmap_config *config)
575 {
576 struct regmap **m;
577 int ret;
578
579 map->dev = dev;
580
581 ret = regmap_set_name(map, config);
582 if (ret)
583 return ret;
584
585 regmap_debugfs_exit(map);
586 regmap_debugfs_init(map);
587
588 /* Add a devres resource for dev_get_regmap() */
589 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
590 if (!m) {
591 regmap_debugfs_exit(map);
592 return -ENOMEM;
593 }
594 *m = map;
595 devres_add(dev, m);
596
597 return 0;
598 }
599 EXPORT_SYMBOL_GPL(regmap_attach_dev);
600
601 static int dev_get_regmap_match(struct device *dev, void *res, void *data);
602
regmap_detach_dev(struct device * dev,struct regmap * map)603 static int regmap_detach_dev(struct device *dev, struct regmap *map)
604 {
605 if (!dev)
606 return 0;
607
608 return devres_release(dev, dev_get_regmap_release,
609 dev_get_regmap_match, (void *)map->name);
610 }
611
regmap_get_reg_endian(const struct regmap_bus * bus,const struct regmap_config * config)612 static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
613 const struct regmap_config *config)
614 {
615 enum regmap_endian endian;
616
617 /* Retrieve the endianness specification from the regmap config */
618 endian = config->reg_format_endian;
619
620 /* If the regmap config specified a non-default value, use that */
621 if (endian != REGMAP_ENDIAN_DEFAULT)
622 return endian;
623
624 /* Retrieve the endianness specification from the bus config */
625 if (bus && bus->reg_format_endian_default)
626 endian = bus->reg_format_endian_default;
627
628 /* If the bus specified a non-default value, use that */
629 if (endian != REGMAP_ENDIAN_DEFAULT)
630 return endian;
631
632 /* Use this if no other value was found */
633 return REGMAP_ENDIAN_BIG;
634 }
635
regmap_get_val_endian(struct device * dev,const struct regmap_bus * bus,const struct regmap_config * config)636 enum regmap_endian regmap_get_val_endian(struct device *dev,
637 const struct regmap_bus *bus,
638 const struct regmap_config *config)
639 {
640 struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
641 enum regmap_endian endian;
642
643 /* Retrieve the endianness specification from the regmap config */
644 endian = config->val_format_endian;
645
646 /* If the regmap config specified a non-default value, use that */
647 if (endian != REGMAP_ENDIAN_DEFAULT)
648 return endian;
649
650 /* If the firmware node exist try to get endianness from it */
651 if (fwnode_property_read_bool(fwnode, "big-endian"))
652 endian = REGMAP_ENDIAN_BIG;
653 else if (fwnode_property_read_bool(fwnode, "little-endian"))
654 endian = REGMAP_ENDIAN_LITTLE;
655 else if (fwnode_property_read_bool(fwnode, "native-endian"))
656 endian = REGMAP_ENDIAN_NATIVE;
657
658 /* If the endianness was specified in fwnode, use that */
659 if (endian != REGMAP_ENDIAN_DEFAULT)
660 return endian;
661
662 /* Retrieve the endianness specification from the bus config */
663 if (bus && bus->val_format_endian_default)
664 endian = bus->val_format_endian_default;
665
666 /* If the bus specified a non-default value, use that */
667 if (endian != REGMAP_ENDIAN_DEFAULT)
668 return endian;
669
670 /* Use this if no other value was found */
671 return REGMAP_ENDIAN_BIG;
672 }
673 EXPORT_SYMBOL_GPL(regmap_get_val_endian);
674
__regmap_init(struct device * dev,const struct regmap_bus * bus,void * bus_context,const struct regmap_config * config,struct lock_class_key * lock_key,const char * lock_name)675 struct regmap *__regmap_init(struct device *dev,
676 const struct regmap_bus *bus,
677 void *bus_context,
678 const struct regmap_config *config,
679 struct lock_class_key *lock_key,
680 const char *lock_name)
681 {
682 struct regmap *map;
683 int ret = -EINVAL;
684 enum regmap_endian reg_endian, val_endian;
685 int i, j;
686
687 if (!config)
688 goto err;
689
690 map = kzalloc(sizeof(*map), GFP_KERNEL);
691 if (map == NULL) {
692 ret = -ENOMEM;
693 goto err;
694 }
695
696 ret = regmap_set_name(map, config);
697 if (ret)
698 goto err_map;
699
700 ret = -EINVAL; /* Later error paths rely on this */
701
702 if (config->disable_locking) {
703 map->lock = map->unlock = regmap_lock_unlock_none;
704 map->can_sleep = config->can_sleep;
705 regmap_debugfs_disable(map);
706 } else if (config->lock && config->unlock) {
707 map->lock = config->lock;
708 map->unlock = config->unlock;
709 map->lock_arg = config->lock_arg;
710 map->can_sleep = config->can_sleep;
711 } else if (config->use_hwlock) {
712 map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
713 if (!map->hwlock) {
714 ret = -ENXIO;
715 goto err_name;
716 }
717
718 switch (config->hwlock_mode) {
719 case HWLOCK_IRQSTATE:
720 map->lock = regmap_lock_hwlock_irqsave;
721 map->unlock = regmap_unlock_hwlock_irqrestore;
722 break;
723 case HWLOCK_IRQ:
724 map->lock = regmap_lock_hwlock_irq;
725 map->unlock = regmap_unlock_hwlock_irq;
726 break;
727 default:
728 map->lock = regmap_lock_hwlock;
729 map->unlock = regmap_unlock_hwlock;
730 break;
731 }
732
733 map->lock_arg = map;
734 } else {
735 if ((bus && bus->fast_io) ||
736 config->fast_io) {
737 if (config->use_raw_spinlock) {
738 raw_spin_lock_init(&map->raw_spinlock);
739 map->lock = regmap_lock_raw_spinlock;
740 map->unlock = regmap_unlock_raw_spinlock;
741 lockdep_set_class_and_name(&map->raw_spinlock,
742 lock_key, lock_name);
743 } else {
744 spin_lock_init(&map->spinlock);
745 map->lock = regmap_lock_spinlock;
746 map->unlock = regmap_unlock_spinlock;
747 lockdep_set_class_and_name(&map->spinlock,
748 lock_key, lock_name);
749 }
750 } else {
751 mutex_init(&map->mutex);
752 map->lock = regmap_lock_mutex;
753 map->unlock = regmap_unlock_mutex;
754 map->can_sleep = true;
755 lockdep_set_class_and_name(&map->mutex,
756 lock_key, lock_name);
757 }
758 map->lock_arg = map;
759 map->lock_key = lock_key;
760 }
761
762 /*
763 * When we write in fast-paths with regmap_bulk_write() don't allocate
764 * scratch buffers with sleeping allocations.
765 */
766 if ((bus && bus->fast_io) || config->fast_io)
767 map->alloc_flags = GFP_ATOMIC;
768 else
769 map->alloc_flags = GFP_KERNEL;
770
771 map->reg_base = config->reg_base;
772
773 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
774 map->format.pad_bytes = config->pad_bits / 8;
775 map->format.reg_shift = config->reg_shift;
776 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
777 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
778 config->val_bits + config->pad_bits, 8);
779 map->reg_shift = config->pad_bits % 8;
780 if (config->reg_stride)
781 map->reg_stride = config->reg_stride;
782 else
783 map->reg_stride = 1;
784 if (is_power_of_2(map->reg_stride))
785 map->reg_stride_order = ilog2(map->reg_stride);
786 else
787 map->reg_stride_order = -1;
788 map->use_single_read = config->use_single_read || !(config->read || (bus && bus->read));
789 map->use_single_write = config->use_single_write || !(config->write || (bus && bus->write));
790 map->can_multi_write = config->can_multi_write && (config->write || (bus && bus->write));
791 if (bus) {
792 map->max_raw_read = bus->max_raw_read;
793 map->max_raw_write = bus->max_raw_write;
794 } else if (config->max_raw_read && config->max_raw_write) {
795 map->max_raw_read = config->max_raw_read;
796 map->max_raw_write = config->max_raw_write;
797 }
798 map->dev = dev;
799 map->bus = bus;
800 map->bus_context = bus_context;
801 map->max_register = config->max_register;
802 map->max_register_is_set = map->max_register ?: config->max_register_is_0;
803 map->wr_table = config->wr_table;
804 map->rd_table = config->rd_table;
805 map->volatile_table = config->volatile_table;
806 map->precious_table = config->precious_table;
807 map->wr_noinc_table = config->wr_noinc_table;
808 map->rd_noinc_table = config->rd_noinc_table;
809 map->writeable_reg = config->writeable_reg;
810 map->readable_reg = config->readable_reg;
811 map->volatile_reg = config->volatile_reg;
812 map->precious_reg = config->precious_reg;
813 map->writeable_noinc_reg = config->writeable_noinc_reg;
814 map->readable_noinc_reg = config->readable_noinc_reg;
815 map->cache_type = config->cache_type;
816
817 spin_lock_init(&map->async_lock);
818 INIT_LIST_HEAD(&map->async_list);
819 INIT_LIST_HEAD(&map->async_free);
820 init_waitqueue_head(&map->async_waitq);
821
822 if (config->read_flag_mask ||
823 config->write_flag_mask ||
824 config->zero_flag_mask) {
825 map->read_flag_mask = config->read_flag_mask;
826 map->write_flag_mask = config->write_flag_mask;
827 } else if (bus) {
828 map->read_flag_mask = bus->read_flag_mask;
829 }
830
831 if (config && config->read && config->write) {
832 map->reg_read = _regmap_bus_read;
833 if (config->reg_update_bits)
834 map->reg_update_bits = config->reg_update_bits;
835
836 /* Bulk read/write */
837 map->read = config->read;
838 map->write = config->write;
839
840 reg_endian = REGMAP_ENDIAN_NATIVE;
841 val_endian = REGMAP_ENDIAN_NATIVE;
842 } else if (!bus) {
843 map->reg_read = config->reg_read;
844 map->reg_write = config->reg_write;
845 map->reg_update_bits = config->reg_update_bits;
846
847 map->defer_caching = false;
848 goto skip_format_initialization;
849 } else if (!bus->read || !bus->write) {
850 map->reg_read = _regmap_bus_reg_read;
851 map->reg_write = _regmap_bus_reg_write;
852 map->reg_update_bits = bus->reg_update_bits;
853
854 map->defer_caching = false;
855 goto skip_format_initialization;
856 } else {
857 map->reg_read = _regmap_bus_read;
858 map->reg_update_bits = bus->reg_update_bits;
859 /* Bulk read/write */
860 map->read = bus->read;
861 map->write = bus->write;
862
863 reg_endian = regmap_get_reg_endian(bus, config);
864 val_endian = regmap_get_val_endian(dev, bus, config);
865 }
866
867 switch (config->reg_bits + map->reg_shift) {
868 case 2:
869 switch (config->val_bits) {
870 case 6:
871 map->format.format_write = regmap_format_2_6_write;
872 break;
873 default:
874 goto err_hwlock;
875 }
876 break;
877
878 case 4:
879 switch (config->val_bits) {
880 case 12:
881 map->format.format_write = regmap_format_4_12_write;
882 break;
883 default:
884 goto err_hwlock;
885 }
886 break;
887
888 case 7:
889 switch (config->val_bits) {
890 case 9:
891 map->format.format_write = regmap_format_7_9_write;
892 break;
893 case 17:
894 map->format.format_write = regmap_format_7_17_write;
895 break;
896 default:
897 goto err_hwlock;
898 }
899 break;
900
901 case 10:
902 switch (config->val_bits) {
903 case 14:
904 map->format.format_write = regmap_format_10_14_write;
905 break;
906 default:
907 goto err_hwlock;
908 }
909 break;
910
911 case 12:
912 switch (config->val_bits) {
913 case 20:
914 map->format.format_write = regmap_format_12_20_write;
915 break;
916 default:
917 goto err_hwlock;
918 }
919 break;
920
921 case 8:
922 map->format.format_reg = regmap_format_8;
923 break;
924
925 case 16:
926 switch (reg_endian) {
927 case REGMAP_ENDIAN_BIG:
928 map->format.format_reg = regmap_format_16_be;
929 break;
930 case REGMAP_ENDIAN_LITTLE:
931 map->format.format_reg = regmap_format_16_le;
932 break;
933 case REGMAP_ENDIAN_NATIVE:
934 map->format.format_reg = regmap_format_16_native;
935 break;
936 default:
937 goto err_hwlock;
938 }
939 break;
940
941 case 24:
942 switch (reg_endian) {
943 case REGMAP_ENDIAN_BIG:
944 map->format.format_reg = regmap_format_24_be;
945 break;
946 default:
947 goto err_hwlock;
948 }
949 break;
950
951 case 32:
952 switch (reg_endian) {
953 case REGMAP_ENDIAN_BIG:
954 map->format.format_reg = regmap_format_32_be;
955 break;
956 case REGMAP_ENDIAN_LITTLE:
957 map->format.format_reg = regmap_format_32_le;
958 break;
959 case REGMAP_ENDIAN_NATIVE:
960 map->format.format_reg = regmap_format_32_native;
961 break;
962 default:
963 goto err_hwlock;
964 }
965 break;
966
967 default:
968 goto err_hwlock;
969 }
970
971 if (val_endian == REGMAP_ENDIAN_NATIVE)
972 map->format.parse_inplace = regmap_parse_inplace_noop;
973
974 switch (config->val_bits) {
975 case 8:
976 map->format.format_val = regmap_format_8;
977 map->format.parse_val = regmap_parse_8;
978 map->format.parse_inplace = regmap_parse_inplace_noop;
979 break;
980 case 16:
981 switch (val_endian) {
982 case REGMAP_ENDIAN_BIG:
983 map->format.format_val = regmap_format_16_be;
984 map->format.parse_val = regmap_parse_16_be;
985 map->format.parse_inplace = regmap_parse_16_be_inplace;
986 break;
987 case REGMAP_ENDIAN_LITTLE:
988 map->format.format_val = regmap_format_16_le;
989 map->format.parse_val = regmap_parse_16_le;
990 map->format.parse_inplace = regmap_parse_16_le_inplace;
991 break;
992 case REGMAP_ENDIAN_NATIVE:
993 map->format.format_val = regmap_format_16_native;
994 map->format.parse_val = regmap_parse_16_native;
995 break;
996 default:
997 goto err_hwlock;
998 }
999 break;
1000 case 24:
1001 switch (val_endian) {
1002 case REGMAP_ENDIAN_BIG:
1003 map->format.format_val = regmap_format_24_be;
1004 map->format.parse_val = regmap_parse_24_be;
1005 break;
1006 default:
1007 goto err_hwlock;
1008 }
1009 break;
1010 case 32:
1011 switch (val_endian) {
1012 case REGMAP_ENDIAN_BIG:
1013 map->format.format_val = regmap_format_32_be;
1014 map->format.parse_val = regmap_parse_32_be;
1015 map->format.parse_inplace = regmap_parse_32_be_inplace;
1016 break;
1017 case REGMAP_ENDIAN_LITTLE:
1018 map->format.format_val = regmap_format_32_le;
1019 map->format.parse_val = regmap_parse_32_le;
1020 map->format.parse_inplace = regmap_parse_32_le_inplace;
1021 break;
1022 case REGMAP_ENDIAN_NATIVE:
1023 map->format.format_val = regmap_format_32_native;
1024 map->format.parse_val = regmap_parse_32_native;
1025 break;
1026 default:
1027 goto err_hwlock;
1028 }
1029 break;
1030 }
1031
1032 if (map->format.format_write) {
1033 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
1034 (val_endian != REGMAP_ENDIAN_BIG))
1035 goto err_hwlock;
1036 map->use_single_write = true;
1037 }
1038
1039 if (!map->format.format_write &&
1040 !(map->format.format_reg && map->format.format_val))
1041 goto err_hwlock;
1042
1043 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
1044 if (map->work_buf == NULL) {
1045 ret = -ENOMEM;
1046 goto err_hwlock;
1047 }
1048
1049 if (map->format.format_write) {
1050 map->defer_caching = false;
1051 map->reg_write = _regmap_bus_formatted_write;
1052 } else if (map->format.format_val) {
1053 map->defer_caching = true;
1054 map->reg_write = _regmap_bus_raw_write;
1055 }
1056
1057 skip_format_initialization:
1058
1059 map->range_tree = RB_ROOT;
1060 for (i = 0; i < config->num_ranges; i++) {
1061 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1062 struct regmap_range_node *new;
1063
1064 /* Sanity check */
1065 if (range_cfg->range_max < range_cfg->range_min) {
1066 dev_err(map->dev, "Invalid range %d: %u < %u\n", i,
1067 range_cfg->range_max, range_cfg->range_min);
1068 goto err_range;
1069 }
1070
1071 if (range_cfg->range_max > map->max_register) {
1072 dev_err(map->dev, "Invalid range %d: %u > %u\n", i,
1073 range_cfg->range_max, map->max_register);
1074 goto err_range;
1075 }
1076
1077 if (range_cfg->selector_reg > map->max_register) {
1078 dev_err(map->dev,
1079 "Invalid range %d: selector out of map\n", i);
1080 goto err_range;
1081 }
1082
1083 if (range_cfg->window_len == 0) {
1084 dev_err(map->dev, "Invalid range %d: window_len 0\n",
1085 i);
1086 goto err_range;
1087 }
1088
1089 /* Make sure, that this register range has no selector
1090 or data window within its boundary */
1091 for (j = 0; j < config->num_ranges; j++) {
1092 unsigned int sel_reg = config->ranges[j].selector_reg;
1093 unsigned int win_min = config->ranges[j].window_start;
1094 unsigned int win_max = win_min +
1095 config->ranges[j].window_len - 1;
1096
1097 /* Allow data window inside its own virtual range */
1098 if (j == i)
1099 continue;
1100
1101 if (range_cfg->range_min <= sel_reg &&
1102 sel_reg <= range_cfg->range_max) {
1103 dev_err(map->dev,
1104 "Range %d: selector for %d in window\n",
1105 i, j);
1106 goto err_range;
1107 }
1108
1109 if (!(win_max < range_cfg->range_min ||
1110 win_min > range_cfg->range_max)) {
1111 dev_err(map->dev,
1112 "Range %d: window for %d in window\n",
1113 i, j);
1114 goto err_range;
1115 }
1116 }
1117
1118 new = kzalloc(sizeof(*new), GFP_KERNEL);
1119 if (new == NULL) {
1120 ret = -ENOMEM;
1121 goto err_range;
1122 }
1123
1124 new->map = map;
1125 new->name = range_cfg->name;
1126 new->range_min = range_cfg->range_min;
1127 new->range_max = range_cfg->range_max;
1128 new->selector_reg = range_cfg->selector_reg;
1129 new->selector_mask = range_cfg->selector_mask;
1130 new->selector_shift = range_cfg->selector_shift;
1131 new->window_start = range_cfg->window_start;
1132 new->window_len = range_cfg->window_len;
1133
1134 if (!_regmap_range_add(map, new)) {
1135 dev_err(map->dev, "Failed to add range %d\n", i);
1136 kfree(new);
1137 goto err_range;
1138 }
1139
1140 if (map->selector_work_buf == NULL) {
1141 map->selector_work_buf =
1142 kzalloc(map->format.buf_size, GFP_KERNEL);
1143 if (map->selector_work_buf == NULL) {
1144 ret = -ENOMEM;
1145 goto err_range;
1146 }
1147 }
1148 }
1149
1150 ret = regcache_init(map, config);
1151 if (ret != 0)
1152 goto err_range;
1153
1154 if (dev) {
1155 ret = regmap_attach_dev(dev, map, config);
1156 if (ret != 0)
1157 goto err_regcache;
1158 } else {
1159 regmap_debugfs_init(map);
1160 }
1161
1162 return map;
1163
1164 err_regcache:
1165 regcache_exit(map);
1166 err_range:
1167 regmap_range_exit(map);
1168 kfree(map->work_buf);
1169 err_hwlock:
1170 if (map->hwlock)
1171 hwspin_lock_free(map->hwlock);
1172 err_name:
1173 kfree_const(map->name);
1174 err_map:
1175 kfree(map);
1176 err:
1177 if (bus && bus->free_on_exit)
1178 kfree(bus);
1179 return ERR_PTR(ret);
1180 }
1181 EXPORT_SYMBOL_GPL(__regmap_init);
1182
devm_regmap_release(struct device * dev,void * res)1183 static void devm_regmap_release(struct device *dev, void *res)
1184 {
1185 regmap_exit(*(struct regmap **)res);
1186 }
1187
__devm_regmap_init(struct device * dev,const struct regmap_bus * bus,void * bus_context,const struct regmap_config * config,struct lock_class_key * lock_key,const char * lock_name)1188 struct regmap *__devm_regmap_init(struct device *dev,
1189 const struct regmap_bus *bus,
1190 void *bus_context,
1191 const struct regmap_config *config,
1192 struct lock_class_key *lock_key,
1193 const char *lock_name)
1194 {
1195 struct regmap **ptr, *regmap;
1196
1197 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1198 if (!ptr)
1199 return ERR_PTR(-ENOMEM);
1200
1201 regmap = __regmap_init(dev, bus, bus_context, config,
1202 lock_key, lock_name);
1203 if (!IS_ERR(regmap)) {
1204 *ptr = regmap;
1205 devres_add(dev, ptr);
1206 } else {
1207 devres_free(ptr);
1208 }
1209
1210 return regmap;
1211 }
1212 EXPORT_SYMBOL_GPL(__devm_regmap_init);
1213
regmap_field_init(struct regmap_field * rm_field,struct regmap * regmap,struct reg_field reg_field)1214 static void regmap_field_init(struct regmap_field *rm_field,
1215 struct regmap *regmap, struct reg_field reg_field)
1216 {
1217 rm_field->regmap = regmap;
1218 rm_field->reg = reg_field.reg;
1219 rm_field->shift = reg_field.lsb;
1220 rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1221
1222 WARN_ONCE(rm_field->mask == 0, "invalid empty mask defined\n");
1223
1224 rm_field->id_size = reg_field.id_size;
1225 rm_field->id_offset = reg_field.id_offset;
1226 }
1227
1228 /**
1229 * devm_regmap_field_alloc() - Allocate and initialise a register field.
1230 *
1231 * @dev: Device that will be interacted with
1232 * @regmap: regmap bank in which this register field is located.
1233 * @reg_field: Register field with in the bank.
1234 *
1235 * The return value will be an ERR_PTR() on error or a valid pointer
1236 * to a struct regmap_field. The regmap_field will be automatically freed
1237 * by the device management code.
1238 */
devm_regmap_field_alloc(struct device * dev,struct regmap * regmap,struct reg_field reg_field)1239 struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1240 struct regmap *regmap, struct reg_field reg_field)
1241 {
1242 struct regmap_field *rm_field = devm_kzalloc(dev,
1243 sizeof(*rm_field), GFP_KERNEL);
1244 if (!rm_field)
1245 return ERR_PTR(-ENOMEM);
1246
1247 regmap_field_init(rm_field, regmap, reg_field);
1248
1249 return rm_field;
1250
1251 }
1252 EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1253
1254
1255 /**
1256 * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field.
1257 *
1258 * @regmap: regmap bank in which this register field is located.
1259 * @rm_field: regmap register fields within the bank.
1260 * @reg_field: Register fields within the bank.
1261 * @num_fields: Number of register fields.
1262 *
1263 * The return value will be an -ENOMEM on error or zero for success.
1264 * Newly allocated regmap_fields should be freed by calling
1265 * regmap_field_bulk_free()
1266 */
regmap_field_bulk_alloc(struct regmap * regmap,struct regmap_field ** rm_field,const struct reg_field * reg_field,int num_fields)1267 int regmap_field_bulk_alloc(struct regmap *regmap,
1268 struct regmap_field **rm_field,
1269 const struct reg_field *reg_field,
1270 int num_fields)
1271 {
1272 struct regmap_field *rf;
1273 int i;
1274
1275 rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL);
1276 if (!rf)
1277 return -ENOMEM;
1278
1279 for (i = 0; i < num_fields; i++) {
1280 regmap_field_init(&rf[i], regmap, reg_field[i]);
1281 rm_field[i] = &rf[i];
1282 }
1283
1284 return 0;
1285 }
1286 EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
1287
1288 /**
1289 * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register
1290 * fields.
1291 *
1292 * @dev: Device that will be interacted with
1293 * @regmap: regmap bank in which this register field is located.
1294 * @rm_field: regmap register fields within the bank.
1295 * @reg_field: Register fields within the bank.
1296 * @num_fields: Number of register fields.
1297 *
1298 * The return value will be an -ENOMEM on error or zero for success.
1299 * Newly allocated regmap_fields will be automatically freed by the
1300 * device management code.
1301 */
devm_regmap_field_bulk_alloc(struct device * dev,struct regmap * regmap,struct regmap_field ** rm_field,const struct reg_field * reg_field,int num_fields)1302 int devm_regmap_field_bulk_alloc(struct device *dev,
1303 struct regmap *regmap,
1304 struct regmap_field **rm_field,
1305 const struct reg_field *reg_field,
1306 int num_fields)
1307 {
1308 struct regmap_field *rf;
1309 int i;
1310
1311 rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL);
1312 if (!rf)
1313 return -ENOMEM;
1314
1315 for (i = 0; i < num_fields; i++) {
1316 regmap_field_init(&rf[i], regmap, reg_field[i]);
1317 rm_field[i] = &rf[i];
1318 }
1319
1320 return 0;
1321 }
1322 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc);
1323
1324 /**
1325 * regmap_field_bulk_free() - Free register field allocated using
1326 * regmap_field_bulk_alloc.
1327 *
1328 * @field: regmap fields which should be freed.
1329 */
regmap_field_bulk_free(struct regmap_field * field)1330 void regmap_field_bulk_free(struct regmap_field *field)
1331 {
1332 kfree(field);
1333 }
1334 EXPORT_SYMBOL_GPL(regmap_field_bulk_free);
1335
1336 /**
1337 * devm_regmap_field_bulk_free() - Free a bulk register field allocated using
1338 * devm_regmap_field_bulk_alloc.
1339 *
1340 * @dev: Device that will be interacted with
1341 * @field: regmap field which should be freed.
1342 *
1343 * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually
1344 * drivers need not call this function, as the memory allocated via devm
1345 * will be freed as per device-driver life-cycle.
1346 */
devm_regmap_field_bulk_free(struct device * dev,struct regmap_field * field)1347 void devm_regmap_field_bulk_free(struct device *dev,
1348 struct regmap_field *field)
1349 {
1350 devm_kfree(dev, field);
1351 }
1352 EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free);
1353
1354 /**
1355 * devm_regmap_field_free() - Free a register field allocated using
1356 * devm_regmap_field_alloc.
1357 *
1358 * @dev: Device that will be interacted with
1359 * @field: regmap field which should be freed.
1360 *
1361 * Free register field allocated using devm_regmap_field_alloc(). Usually
1362 * drivers need not call this function, as the memory allocated via devm
1363 * will be freed as per device-driver life-cyle.
1364 */
devm_regmap_field_free(struct device * dev,struct regmap_field * field)1365 void devm_regmap_field_free(struct device *dev,
1366 struct regmap_field *field)
1367 {
1368 devm_kfree(dev, field);
1369 }
1370 EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1371
1372 /**
1373 * regmap_field_alloc() - Allocate and initialise a register field.
1374 *
1375 * @regmap: regmap bank in which this register field is located.
1376 * @reg_field: Register field with in the bank.
1377 *
1378 * The return value will be an ERR_PTR() on error or a valid pointer
1379 * to a struct regmap_field. The regmap_field should be freed by the
1380 * user once its finished working with it using regmap_field_free().
1381 */
regmap_field_alloc(struct regmap * regmap,struct reg_field reg_field)1382 struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1383 struct reg_field reg_field)
1384 {
1385 struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1386
1387 if (!rm_field)
1388 return ERR_PTR(-ENOMEM);
1389
1390 regmap_field_init(rm_field, regmap, reg_field);
1391
1392 return rm_field;
1393 }
1394 EXPORT_SYMBOL_GPL(regmap_field_alloc);
1395
1396 /**
1397 * regmap_field_free() - Free register field allocated using
1398 * regmap_field_alloc.
1399 *
1400 * @field: regmap field which should be freed.
1401 */
regmap_field_free(struct regmap_field * field)1402 void regmap_field_free(struct regmap_field *field)
1403 {
1404 kfree(field);
1405 }
1406 EXPORT_SYMBOL_GPL(regmap_field_free);
1407
1408 /**
1409 * regmap_reinit_cache() - Reinitialise the current register cache
1410 *
1411 * @map: Register map to operate on.
1412 * @config: New configuration. Only the cache data will be used.
1413 *
1414 * Discard any existing register cache for the map and initialize a
1415 * new cache. This can be used to restore the cache to defaults or to
1416 * update the cache configuration to reflect runtime discovery of the
1417 * hardware.
1418 *
1419 * No explicit locking is done here, the user needs to ensure that
1420 * this function will not race with other calls to regmap.
1421 */
regmap_reinit_cache(struct regmap * map,const struct regmap_config * config)1422 int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1423 {
1424 int ret;
1425
1426 regcache_exit(map);
1427 regmap_debugfs_exit(map);
1428
1429 map->max_register = config->max_register;
1430 map->max_register_is_set = map->max_register ?: config->max_register_is_0;
1431 map->writeable_reg = config->writeable_reg;
1432 map->readable_reg = config->readable_reg;
1433 map->volatile_reg = config->volatile_reg;
1434 map->precious_reg = config->precious_reg;
1435 map->writeable_noinc_reg = config->writeable_noinc_reg;
1436 map->readable_noinc_reg = config->readable_noinc_reg;
1437 map->cache_type = config->cache_type;
1438
1439 ret = regmap_set_name(map, config);
1440 if (ret)
1441 return ret;
1442
1443 regmap_debugfs_init(map);
1444
1445 map->cache_bypass = false;
1446 map->cache_only = false;
1447
1448 return regcache_init(map, config);
1449 }
1450 EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1451
1452 /**
1453 * regmap_exit() - Free a previously allocated register map
1454 *
1455 * @map: Register map to operate on.
1456 */
regmap_exit(struct regmap * map)1457 void regmap_exit(struct regmap *map)
1458 {
1459 struct regmap_async *async;
1460
1461 regmap_detach_dev(map->dev, map);
1462 regcache_exit(map);
1463
1464 regmap_debugfs_exit(map);
1465 regmap_range_exit(map);
1466 if (map->bus && map->bus->free_context)
1467 map->bus->free_context(map->bus_context);
1468 kfree(map->work_buf);
1469 while (!list_empty(&map->async_free)) {
1470 async = list_first_entry_or_null(&map->async_free,
1471 struct regmap_async,
1472 list);
1473 list_del(&async->list);
1474 kfree(async->work_buf);
1475 kfree(async);
1476 }
1477 if (map->hwlock)
1478 hwspin_lock_free(map->hwlock);
1479 if (map->lock == regmap_lock_mutex)
1480 mutex_destroy(&map->mutex);
1481 kfree_const(map->name);
1482 kfree(map->patch);
1483 if (map->bus && map->bus->free_on_exit)
1484 kfree(map->bus);
1485 kfree(map);
1486 }
1487 EXPORT_SYMBOL_GPL(regmap_exit);
1488
dev_get_regmap_match(struct device * dev,void * res,void * data)1489 static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1490 {
1491 struct regmap **r = res;
1492 if (!r || !*r) {
1493 WARN_ON(!r || !*r);
1494 return 0;
1495 }
1496
1497 /* If the user didn't specify a name match any */
1498 if (data)
1499 return (*r)->name && !strcmp((*r)->name, data);
1500 else
1501 return 1;
1502 }
1503
1504 /**
1505 * dev_get_regmap() - Obtain the regmap (if any) for a device
1506 *
1507 * @dev: Device to retrieve the map for
1508 * @name: Optional name for the register map, usually NULL.
1509 *
1510 * Returns the regmap for the device if one is present, or NULL. If
1511 * name is specified then it must match the name specified when
1512 * registering the device, if it is NULL then the first regmap found
1513 * will be used. Devices with multiple register maps are very rare,
1514 * generic code should normally not need to specify a name.
1515 */
dev_get_regmap(struct device * dev,const char * name)1516 struct regmap *dev_get_regmap(struct device *dev, const char *name)
1517 {
1518 struct regmap **r = devres_find(dev, dev_get_regmap_release,
1519 dev_get_regmap_match, (void *)name);
1520
1521 if (!r)
1522 return NULL;
1523 return *r;
1524 }
1525 EXPORT_SYMBOL_GPL(dev_get_regmap);
1526
1527 /**
1528 * regmap_get_device() - Obtain the device from a regmap
1529 *
1530 * @map: Register map to operate on.
1531 *
1532 * Returns the underlying device that the regmap has been created for.
1533 */
regmap_get_device(struct regmap * map)1534 struct device *regmap_get_device(struct regmap *map)
1535 {
1536 return map->dev;
1537 }
1538 EXPORT_SYMBOL_GPL(regmap_get_device);
1539
_regmap_select_page(struct regmap * map,unsigned int * reg,struct regmap_range_node * range,unsigned int val_num)1540 static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1541 struct regmap_range_node *range,
1542 unsigned int val_num)
1543 {
1544 void *orig_work_buf;
1545 unsigned int win_offset;
1546 unsigned int win_page;
1547 bool page_chg;
1548 int ret;
1549
1550 win_offset = (*reg - range->range_min) % range->window_len;
1551 win_page = (*reg - range->range_min) / range->window_len;
1552
1553 if (val_num > 1) {
1554 /* Bulk write shouldn't cross range boundary */
1555 if (*reg + val_num - 1 > range->range_max)
1556 return -EINVAL;
1557
1558 /* ... or single page boundary */
1559 if (val_num > range->window_len - win_offset)
1560 return -EINVAL;
1561 }
1562
1563 /* It is possible to have selector register inside data window.
1564 In that case, selector register is located on every page and
1565 it needs no page switching, when accessed alone. */
1566 if (val_num > 1 ||
1567 range->window_start + win_offset != range->selector_reg) {
1568 /* Use separate work_buf during page switching */
1569 orig_work_buf = map->work_buf;
1570 map->work_buf = map->selector_work_buf;
1571
1572 ret = _regmap_update_bits(map, range->selector_reg,
1573 range->selector_mask,
1574 win_page << range->selector_shift,
1575 &page_chg, false);
1576
1577 map->work_buf = orig_work_buf;
1578
1579 if (ret != 0)
1580 return ret;
1581 }
1582
1583 *reg = range->window_start + win_offset;
1584
1585 return 0;
1586 }
1587
regmap_set_work_buf_flag_mask(struct regmap * map,int max_bytes,unsigned long mask)1588 static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1589 unsigned long mask)
1590 {
1591 u8 *buf;
1592 int i;
1593
1594 if (!mask || !map->work_buf)
1595 return;
1596
1597 buf = map->work_buf;
1598
1599 for (i = 0; i < max_bytes; i++)
1600 buf[i] |= (mask >> (8 * i)) & 0xff;
1601 }
1602
regmap_reg_addr(struct regmap * map,unsigned int reg)1603 static unsigned int regmap_reg_addr(struct regmap *map, unsigned int reg)
1604 {
1605 reg += map->reg_base;
1606
1607 if (map->format.reg_shift > 0)
1608 reg >>= map->format.reg_shift;
1609 else if (map->format.reg_shift < 0)
1610 reg <<= -(map->format.reg_shift);
1611
1612 return reg;
1613 }
1614
_regmap_raw_write_impl(struct regmap * map,unsigned int reg,const void * val,size_t val_len,bool noinc)1615 static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1616 const void *val, size_t val_len, bool noinc)
1617 {
1618 struct regmap_range_node *range;
1619 unsigned long flags;
1620 void *work_val = map->work_buf + map->format.reg_bytes +
1621 map->format.pad_bytes;
1622 void *buf;
1623 int ret = -ENOTSUPP;
1624 size_t len;
1625 int i;
1626
1627 /* Check for unwritable or noinc registers in range
1628 * before we start
1629 */
1630 if (!regmap_writeable_noinc(map, reg)) {
1631 for (i = 0; i < val_len / map->format.val_bytes; i++) {
1632 unsigned int element =
1633 reg + regmap_get_offset(map, i);
1634 if (!regmap_writeable(map, element) ||
1635 regmap_writeable_noinc(map, element))
1636 return -EINVAL;
1637 }
1638 }
1639
1640 if (!map->cache_bypass && map->format.parse_val) {
1641 unsigned int ival, offset;
1642 int val_bytes = map->format.val_bytes;
1643
1644 /* Cache the last written value for noinc writes */
1645 i = noinc ? val_len - val_bytes : 0;
1646 for (; i < val_len; i += val_bytes) {
1647 ival = map->format.parse_val(val + i);
1648 offset = noinc ? 0 : regmap_get_offset(map, i / val_bytes);
1649 ret = regcache_write(map, reg + offset, ival);
1650 if (ret) {
1651 dev_err(map->dev,
1652 "Error in caching of register: %x ret: %d\n",
1653 reg + offset, ret);
1654 return ret;
1655 }
1656 }
1657 if (map->cache_only) {
1658 map->cache_dirty = true;
1659 return 0;
1660 }
1661 }
1662
1663 range = _regmap_range_lookup(map, reg);
1664 if (range) {
1665 int val_num = val_len / map->format.val_bytes;
1666 int win_offset = (reg - range->range_min) % range->window_len;
1667 int win_residue = range->window_len - win_offset;
1668
1669 /* If the write goes beyond the end of the window split it */
1670 while (val_num > win_residue) {
1671 dev_dbg(map->dev, "Writing window %d/%zu\n",
1672 win_residue, val_len / map->format.val_bytes);
1673 ret = _regmap_raw_write_impl(map, reg, val,
1674 win_residue *
1675 map->format.val_bytes, noinc);
1676 if (ret != 0)
1677 return ret;
1678
1679 reg += win_residue;
1680 val_num -= win_residue;
1681 val += win_residue * map->format.val_bytes;
1682 val_len -= win_residue * map->format.val_bytes;
1683
1684 win_offset = (reg - range->range_min) %
1685 range->window_len;
1686 win_residue = range->window_len - win_offset;
1687 }
1688
1689 ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num);
1690 if (ret != 0)
1691 return ret;
1692 }
1693
1694 reg = regmap_reg_addr(map, reg);
1695 map->format.format_reg(map->work_buf, reg, map->reg_shift);
1696 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1697 map->write_flag_mask);
1698
1699 /*
1700 * Essentially all I/O mechanisms will be faster with a single
1701 * buffer to write. Since register syncs often generate raw
1702 * writes of single registers optimise that case.
1703 */
1704 if (val != work_val && val_len == map->format.val_bytes) {
1705 memcpy(work_val, val, map->format.val_bytes);
1706 val = work_val;
1707 }
1708
1709 if (map->async && map->bus && map->bus->async_write) {
1710 struct regmap_async *async;
1711
1712 trace_regmap_async_write_start(map, reg, val_len);
1713
1714 spin_lock_irqsave(&map->async_lock, flags);
1715 async = list_first_entry_or_null(&map->async_free,
1716 struct regmap_async,
1717 list);
1718 if (async)
1719 list_del(&async->list);
1720 spin_unlock_irqrestore(&map->async_lock, flags);
1721
1722 if (!async) {
1723 async = map->bus->async_alloc();
1724 if (!async)
1725 return -ENOMEM;
1726
1727 async->work_buf = kzalloc(map->format.buf_size,
1728 GFP_KERNEL | GFP_DMA);
1729 if (!async->work_buf) {
1730 kfree(async);
1731 return -ENOMEM;
1732 }
1733 }
1734
1735 async->map = map;
1736
1737 /* If the caller supplied the value we can use it safely. */
1738 memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1739 map->format.reg_bytes + map->format.val_bytes);
1740
1741 spin_lock_irqsave(&map->async_lock, flags);
1742 list_add_tail(&async->list, &map->async_list);
1743 spin_unlock_irqrestore(&map->async_lock, flags);
1744
1745 if (val != work_val)
1746 ret = map->bus->async_write(map->bus_context,
1747 async->work_buf,
1748 map->format.reg_bytes +
1749 map->format.pad_bytes,
1750 val, val_len, async);
1751 else
1752 ret = map->bus->async_write(map->bus_context,
1753 async->work_buf,
1754 map->format.reg_bytes +
1755 map->format.pad_bytes +
1756 val_len, NULL, 0, async);
1757
1758 if (ret != 0) {
1759 dev_err(map->dev, "Failed to schedule write: %d\n",
1760 ret);
1761
1762 spin_lock_irqsave(&map->async_lock, flags);
1763 list_move(&async->list, &map->async_free);
1764 spin_unlock_irqrestore(&map->async_lock, flags);
1765 }
1766
1767 return ret;
1768 }
1769
1770 trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1771
1772 /* If we're doing a single register write we can probably just
1773 * send the work_buf directly, otherwise try to do a gather
1774 * write.
1775 */
1776 if (val == work_val)
1777 ret = map->write(map->bus_context, map->work_buf,
1778 map->format.reg_bytes +
1779 map->format.pad_bytes +
1780 val_len);
1781 else if (map->bus && map->bus->gather_write)
1782 ret = map->bus->gather_write(map->bus_context, map->work_buf,
1783 map->format.reg_bytes +
1784 map->format.pad_bytes,
1785 val, val_len);
1786 else
1787 ret = -ENOTSUPP;
1788
1789 /* If that didn't work fall back on linearising by hand. */
1790 if (ret == -ENOTSUPP) {
1791 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1792 buf = kzalloc(len, GFP_KERNEL);
1793 if (!buf)
1794 return -ENOMEM;
1795
1796 memcpy(buf, map->work_buf, map->format.reg_bytes);
1797 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1798 val, val_len);
1799 ret = map->write(map->bus_context, buf, len);
1800
1801 kfree(buf);
1802 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1803 /* regcache_drop_region() takes lock that we already have,
1804 * thus call map->cache_ops->drop() directly
1805 */
1806 if (map->cache_ops && map->cache_ops->drop)
1807 map->cache_ops->drop(map, reg, reg + 1);
1808 }
1809
1810 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1811
1812 return ret;
1813 }
1814
1815 /**
1816 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1817 *
1818 * @map: Map to check.
1819 */
regmap_can_raw_write(struct regmap * map)1820 bool regmap_can_raw_write(struct regmap *map)
1821 {
1822 return map->write && map->format.format_val && map->format.format_reg;
1823 }
1824 EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1825
1826 /**
1827 * regmap_get_raw_read_max - Get the maximum size we can read
1828 *
1829 * @map: Map to check.
1830 */
regmap_get_raw_read_max(struct regmap * map)1831 size_t regmap_get_raw_read_max(struct regmap *map)
1832 {
1833 return map->max_raw_read;
1834 }
1835 EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1836
1837 /**
1838 * regmap_get_raw_write_max - Get the maximum size we can read
1839 *
1840 * @map: Map to check.
1841 */
regmap_get_raw_write_max(struct regmap * map)1842 size_t regmap_get_raw_write_max(struct regmap *map)
1843 {
1844 return map->max_raw_write;
1845 }
1846 EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1847
_regmap_bus_formatted_write(void * context,unsigned int reg,unsigned int val)1848 static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1849 unsigned int val)
1850 {
1851 int ret;
1852 struct regmap_range_node *range;
1853 struct regmap *map = context;
1854
1855 WARN_ON(!map->format.format_write);
1856
1857 range = _regmap_range_lookup(map, reg);
1858 if (range) {
1859 ret = _regmap_select_page(map, ®, range, 1);
1860 if (ret != 0)
1861 return ret;
1862 }
1863
1864 reg = regmap_reg_addr(map, reg);
1865 map->format.format_write(map, reg, val);
1866
1867 trace_regmap_hw_write_start(map, reg, 1);
1868
1869 ret = map->write(map->bus_context, map->work_buf, map->format.buf_size);
1870
1871 trace_regmap_hw_write_done(map, reg, 1);
1872
1873 return ret;
1874 }
1875
_regmap_bus_reg_write(void * context,unsigned int reg,unsigned int val)1876 static int _regmap_bus_reg_write(void *context, unsigned int reg,
1877 unsigned int val)
1878 {
1879 struct regmap *map = context;
1880 struct regmap_range_node *range;
1881 int ret;
1882
1883 range = _regmap_range_lookup(map, reg);
1884 if (range) {
1885 ret = _regmap_select_page(map, ®, range, 1);
1886 if (ret != 0)
1887 return ret;
1888 }
1889
1890 reg = regmap_reg_addr(map, reg);
1891 return map->bus->reg_write(map->bus_context, reg, val);
1892 }
1893
_regmap_bus_raw_write(void * context,unsigned int reg,unsigned int val)1894 static int _regmap_bus_raw_write(void *context, unsigned int reg,
1895 unsigned int val)
1896 {
1897 struct regmap *map = context;
1898
1899 WARN_ON(!map->format.format_val);
1900
1901 map->format.format_val(map->work_buf + map->format.reg_bytes
1902 + map->format.pad_bytes, val, 0);
1903 return _regmap_raw_write_impl(map, reg,
1904 map->work_buf +
1905 map->format.reg_bytes +
1906 map->format.pad_bytes,
1907 map->format.val_bytes,
1908 false);
1909 }
1910
_regmap_map_get_context(struct regmap * map)1911 static inline void *_regmap_map_get_context(struct regmap *map)
1912 {
1913 return (map->bus || (!map->bus && map->read)) ? map : map->bus_context;
1914 }
1915
_regmap_write(struct regmap * map,unsigned int reg,unsigned int val)1916 int _regmap_write(struct regmap *map, unsigned int reg,
1917 unsigned int val)
1918 {
1919 int ret;
1920 void *context = _regmap_map_get_context(map);
1921
1922 if (!regmap_writeable(map, reg))
1923 return -EIO;
1924
1925 if (!map->cache_bypass && !map->defer_caching) {
1926 ret = regcache_write(map, reg, val);
1927 if (ret != 0)
1928 return ret;
1929 if (map->cache_only) {
1930 map->cache_dirty = true;
1931 return 0;
1932 }
1933 }
1934
1935 ret = map->reg_write(context, reg, val);
1936 if (ret == 0) {
1937 if (regmap_should_log(map))
1938 dev_info(map->dev, "%x <= %x\n", reg, val);
1939
1940 trace_regmap_reg_write(map, reg, val);
1941 }
1942
1943 return ret;
1944 }
1945
1946 /**
1947 * regmap_write() - Write a value to a single register
1948 *
1949 * @map: Register map to write to
1950 * @reg: Register to write to
1951 * @val: Value to be written
1952 *
1953 * A value of zero will be returned on success, a negative errno will
1954 * be returned in error cases.
1955 */
regmap_write(struct regmap * map,unsigned int reg,unsigned int val)1956 int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1957 {
1958 int ret;
1959
1960 if (!IS_ALIGNED(reg, map->reg_stride))
1961 return -EINVAL;
1962
1963 map->lock(map->lock_arg);
1964
1965 ret = _regmap_write(map, reg, val);
1966
1967 map->unlock(map->lock_arg);
1968
1969 return ret;
1970 }
1971 EXPORT_SYMBOL_GPL(regmap_write);
1972
1973 /**
1974 * regmap_write_async() - Write a value to a single register asynchronously
1975 *
1976 * @map: Register map to write to
1977 * @reg: Register to write to
1978 * @val: Value to be written
1979 *
1980 * A value of zero will be returned on success, a negative errno will
1981 * be returned in error cases.
1982 */
regmap_write_async(struct regmap * map,unsigned int reg,unsigned int val)1983 int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1984 {
1985 int ret;
1986
1987 if (!IS_ALIGNED(reg, map->reg_stride))
1988 return -EINVAL;
1989
1990 map->lock(map->lock_arg);
1991
1992 map->async = true;
1993
1994 ret = _regmap_write(map, reg, val);
1995
1996 map->async = false;
1997
1998 map->unlock(map->lock_arg);
1999
2000 return ret;
2001 }
2002 EXPORT_SYMBOL_GPL(regmap_write_async);
2003
_regmap_raw_write(struct regmap * map,unsigned int reg,const void * val,size_t val_len,bool noinc)2004 int _regmap_raw_write(struct regmap *map, unsigned int reg,
2005 const void *val, size_t val_len, bool noinc)
2006 {
2007 size_t val_bytes = map->format.val_bytes;
2008 size_t val_count = val_len / val_bytes;
2009 size_t chunk_count, chunk_bytes;
2010 size_t chunk_regs = val_count;
2011 int ret, i;
2012
2013 if (!val_count)
2014 return -EINVAL;
2015
2016 if (map->use_single_write)
2017 chunk_regs = 1;
2018 else if (map->max_raw_write && val_len > map->max_raw_write)
2019 chunk_regs = map->max_raw_write / val_bytes;
2020
2021 chunk_count = val_count / chunk_regs;
2022 chunk_bytes = chunk_regs * val_bytes;
2023
2024 /* Write as many bytes as possible with chunk_size */
2025 for (i = 0; i < chunk_count; i++) {
2026 ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
2027 if (ret)
2028 return ret;
2029
2030 reg += regmap_get_offset(map, chunk_regs);
2031 val += chunk_bytes;
2032 val_len -= chunk_bytes;
2033 }
2034
2035 /* Write remaining bytes */
2036 if (val_len)
2037 ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
2038
2039 return ret;
2040 }
2041
2042 /**
2043 * regmap_raw_write() - Write raw values to one or more registers
2044 *
2045 * @map: Register map to write to
2046 * @reg: Initial register to write to
2047 * @val: Block of data to be written, laid out for direct transmission to the
2048 * device
2049 * @val_len: Length of data pointed to by val.
2050 *
2051 * This function is intended to be used for things like firmware
2052 * download where a large block of data needs to be transferred to the
2053 * device. No formatting will be done on the data provided.
2054 *
2055 * A value of zero will be returned on success, a negative errno will
2056 * be returned in error cases.
2057 */
regmap_raw_write(struct regmap * map,unsigned int reg,const void * val,size_t val_len)2058 int regmap_raw_write(struct regmap *map, unsigned int reg,
2059 const void *val, size_t val_len)
2060 {
2061 int ret;
2062
2063 if (!regmap_can_raw_write(map))
2064 return -EINVAL;
2065 if (val_len % map->format.val_bytes)
2066 return -EINVAL;
2067
2068 map->lock(map->lock_arg);
2069
2070 ret = _regmap_raw_write(map, reg, val, val_len, false);
2071
2072 map->unlock(map->lock_arg);
2073
2074 return ret;
2075 }
2076 EXPORT_SYMBOL_GPL(regmap_raw_write);
2077
regmap_noinc_readwrite(struct regmap * map,unsigned int reg,void * val,unsigned int val_len,bool write)2078 static int regmap_noinc_readwrite(struct regmap *map, unsigned int reg,
2079 void *val, unsigned int val_len, bool write)
2080 {
2081 size_t val_bytes = map->format.val_bytes;
2082 size_t val_count = val_len / val_bytes;
2083 unsigned int lastval;
2084 u8 *u8p;
2085 u16 *u16p;
2086 u32 *u32p;
2087 int ret;
2088 int i;
2089
2090 switch (val_bytes) {
2091 case 1:
2092 u8p = val;
2093 if (write)
2094 lastval = (unsigned int)u8p[val_count - 1];
2095 break;
2096 case 2:
2097 u16p = val;
2098 if (write)
2099 lastval = (unsigned int)u16p[val_count - 1];
2100 break;
2101 case 4:
2102 u32p = val;
2103 if (write)
2104 lastval = (unsigned int)u32p[val_count - 1];
2105 break;
2106 default:
2107 return -EINVAL;
2108 }
2109
2110 /*
2111 * Update the cache with the last value we write, the rest is just
2112 * gone down in the hardware FIFO. We can't cache FIFOs. This makes
2113 * sure a single read from the cache will work.
2114 */
2115 if (write) {
2116 if (!map->cache_bypass && !map->defer_caching) {
2117 ret = regcache_write(map, reg, lastval);
2118 if (ret != 0)
2119 return ret;
2120 if (map->cache_only) {
2121 map->cache_dirty = true;
2122 return 0;
2123 }
2124 }
2125 ret = map->bus->reg_noinc_write(map->bus_context, reg, val, val_count);
2126 } else {
2127 ret = map->bus->reg_noinc_read(map->bus_context, reg, val, val_count);
2128 }
2129
2130 if (!ret && regmap_should_log(map)) {
2131 dev_info(map->dev, "%x %s [", reg, write ? "<=" : "=>");
2132 for (i = 0; i < val_count; i++) {
2133 switch (val_bytes) {
2134 case 1:
2135 pr_cont("%x", u8p[i]);
2136 break;
2137 case 2:
2138 pr_cont("%x", u16p[i]);
2139 break;
2140 case 4:
2141 pr_cont("%x", u32p[i]);
2142 break;
2143 default:
2144 break;
2145 }
2146 if (i == (val_count - 1))
2147 pr_cont("]\n");
2148 else
2149 pr_cont(",");
2150 }
2151 }
2152
2153 return 0;
2154 }
2155
2156 /**
2157 * regmap_noinc_write(): Write data to a register without incrementing the
2158 * register number
2159 *
2160 * @map: Register map to write to
2161 * @reg: Register to write to
2162 * @val: Pointer to data buffer
2163 * @val_len: Length of output buffer in bytes.
2164 *
2165 * The regmap API usually assumes that bulk bus write operations will write a
2166 * range of registers. Some devices have certain registers for which a write
2167 * operation can write to an internal FIFO.
2168 *
2169 * The target register must be volatile but registers after it can be
2170 * completely unrelated cacheable registers.
2171 *
2172 * This will attempt multiple writes as required to write val_len bytes.
2173 *
2174 * A value of zero will be returned on success, a negative errno will be
2175 * returned in error cases.
2176 */
regmap_noinc_write(struct regmap * map,unsigned int reg,const void * val,size_t val_len)2177 int regmap_noinc_write(struct regmap *map, unsigned int reg,
2178 const void *val, size_t val_len)
2179 {
2180 size_t write_len;
2181 int ret;
2182
2183 if (!map->write && !(map->bus && map->bus->reg_noinc_write))
2184 return -EINVAL;
2185 if (val_len % map->format.val_bytes)
2186 return -EINVAL;
2187 if (!IS_ALIGNED(reg, map->reg_stride))
2188 return -EINVAL;
2189 if (val_len == 0)
2190 return -EINVAL;
2191
2192 map->lock(map->lock_arg);
2193
2194 if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
2195 ret = -EINVAL;
2196 goto out_unlock;
2197 }
2198
2199 /*
2200 * Use the accelerated operation if we can. The val drops the const
2201 * typing in order to facilitate code reuse in regmap_noinc_readwrite().
2202 */
2203 if (map->bus->reg_noinc_write) {
2204 ret = regmap_noinc_readwrite(map, reg, (void *)val, val_len, true);
2205 goto out_unlock;
2206 }
2207
2208 while (val_len) {
2209 if (map->max_raw_write && map->max_raw_write < val_len)
2210 write_len = map->max_raw_write;
2211 else
2212 write_len = val_len;
2213 ret = _regmap_raw_write(map, reg, val, write_len, true);
2214 if (ret)
2215 goto out_unlock;
2216 val = ((u8 *)val) + write_len;
2217 val_len -= write_len;
2218 }
2219
2220 out_unlock:
2221 map->unlock(map->lock_arg);
2222 return ret;
2223 }
2224 EXPORT_SYMBOL_GPL(regmap_noinc_write);
2225
2226 /**
2227 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
2228 * register field.
2229 *
2230 * @field: Register field to write to
2231 * @mask: Bitmask to change
2232 * @val: Value to be written
2233 * @change: Boolean indicating if a write was done
2234 * @async: Boolean indicating asynchronously
2235 * @force: Boolean indicating use force update
2236 *
2237 * Perform a read/modify/write cycle on the register field with change,
2238 * async, force option.
2239 *
2240 * A value of zero will be returned on success, a negative errno will
2241 * be returned in error cases.
2242 */
regmap_field_update_bits_base(struct regmap_field * field,unsigned int mask,unsigned int val,bool * change,bool async,bool force)2243 int regmap_field_update_bits_base(struct regmap_field *field,
2244 unsigned int mask, unsigned int val,
2245 bool *change, bool async, bool force)
2246 {
2247 mask = (mask << field->shift) & field->mask;
2248
2249 return regmap_update_bits_base(field->regmap, field->reg,
2250 mask, val << field->shift,
2251 change, async, force);
2252 }
2253 EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
2254
2255 /**
2256 * regmap_field_test_bits() - Check if all specified bits are set in a
2257 * register field.
2258 *
2259 * @field: Register field to operate on
2260 * @bits: Bits to test
2261 *
2262 * Returns -1 if the underlying regmap_field_read() fails, 0 if at least one of the
2263 * tested bits is not set and 1 if all tested bits are set.
2264 */
regmap_field_test_bits(struct regmap_field * field,unsigned int bits)2265 int regmap_field_test_bits(struct regmap_field *field, unsigned int bits)
2266 {
2267 unsigned int val, ret;
2268
2269 ret = regmap_field_read(field, &val);
2270 if (ret)
2271 return ret;
2272
2273 return (val & bits) == bits;
2274 }
2275 EXPORT_SYMBOL_GPL(regmap_field_test_bits);
2276
2277 /**
2278 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
2279 * register field with port ID
2280 *
2281 * @field: Register field to write to
2282 * @id: port ID
2283 * @mask: Bitmask to change
2284 * @val: Value to be written
2285 * @change: Boolean indicating if a write was done
2286 * @async: Boolean indicating asynchronously
2287 * @force: Boolean indicating use force update
2288 *
2289 * A value of zero will be returned on success, a negative errno will
2290 * be returned in error cases.
2291 */
regmap_fields_update_bits_base(struct regmap_field * field,unsigned int id,unsigned int mask,unsigned int val,bool * change,bool async,bool force)2292 int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
2293 unsigned int mask, unsigned int val,
2294 bool *change, bool async, bool force)
2295 {
2296 if (id >= field->id_size)
2297 return -EINVAL;
2298
2299 mask = (mask << field->shift) & field->mask;
2300
2301 return regmap_update_bits_base(field->regmap,
2302 field->reg + (field->id_offset * id),
2303 mask, val << field->shift,
2304 change, async, force);
2305 }
2306 EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
2307
2308 /**
2309 * regmap_bulk_write() - Write multiple registers to the device
2310 *
2311 * @map: Register map to write to
2312 * @reg: First register to be write from
2313 * @val: Block of data to be written, in native register size for device
2314 * @val_count: Number of registers to write
2315 *
2316 * This function is intended to be used for writing a large block of
2317 * data to the device either in single transfer or multiple transfer.
2318 *
2319 * A value of zero will be returned on success, a negative errno will
2320 * be returned in error cases.
2321 */
regmap_bulk_write(struct regmap * map,unsigned int reg,const void * val,size_t val_count)2322 int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
2323 size_t val_count)
2324 {
2325 int ret = 0, i;
2326 size_t val_bytes = map->format.val_bytes;
2327
2328 if (!IS_ALIGNED(reg, map->reg_stride))
2329 return -EINVAL;
2330
2331 /*
2332 * Some devices don't support bulk write, for them we have a series of
2333 * single write operations.
2334 */
2335 if (!map->write || !map->format.parse_inplace) {
2336 map->lock(map->lock_arg);
2337 for (i = 0; i < val_count; i++) {
2338 unsigned int ival;
2339
2340 switch (val_bytes) {
2341 case 1:
2342 ival = *(u8 *)(val + (i * val_bytes));
2343 break;
2344 case 2:
2345 ival = *(u16 *)(val + (i * val_bytes));
2346 break;
2347 case 4:
2348 ival = *(u32 *)(val + (i * val_bytes));
2349 break;
2350 default:
2351 ret = -EINVAL;
2352 goto out;
2353 }
2354
2355 ret = _regmap_write(map,
2356 reg + regmap_get_offset(map, i),
2357 ival);
2358 if (ret != 0)
2359 goto out;
2360 }
2361 out:
2362 map->unlock(map->lock_arg);
2363 } else {
2364 void *wval;
2365
2366 wval = kmemdup_array(val, val_count, val_bytes, map->alloc_flags);
2367 if (!wval)
2368 return -ENOMEM;
2369
2370 for (i = 0; i < val_count * val_bytes; i += val_bytes)
2371 map->format.parse_inplace(wval + i);
2372
2373 ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
2374
2375 kfree(wval);
2376 }
2377
2378 if (!ret)
2379 trace_regmap_bulk_write(map, reg, val, val_bytes * val_count);
2380
2381 return ret;
2382 }
2383 EXPORT_SYMBOL_GPL(regmap_bulk_write);
2384
2385 /*
2386 * _regmap_raw_multi_reg_write()
2387 *
2388 * the (register,newvalue) pairs in regs have not been formatted, but
2389 * they are all in the same page and have been changed to being page
2390 * relative. The page register has been written if that was necessary.
2391 */
_regmap_raw_multi_reg_write(struct regmap * map,const struct reg_sequence * regs,size_t num_regs)2392 static int _regmap_raw_multi_reg_write(struct regmap *map,
2393 const struct reg_sequence *regs,
2394 size_t num_regs)
2395 {
2396 int ret;
2397 void *buf;
2398 int i;
2399 u8 *u8;
2400 size_t val_bytes = map->format.val_bytes;
2401 size_t reg_bytes = map->format.reg_bytes;
2402 size_t pad_bytes = map->format.pad_bytes;
2403 size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2404 size_t len = pair_size * num_regs;
2405
2406 if (!len)
2407 return -EINVAL;
2408
2409 buf = kzalloc(len, GFP_KERNEL);
2410 if (!buf)
2411 return -ENOMEM;
2412
2413 /* We have to linearise by hand. */
2414
2415 u8 = buf;
2416
2417 for (i = 0; i < num_regs; i++) {
2418 unsigned int reg = regs[i].reg;
2419 unsigned int val = regs[i].def;
2420 trace_regmap_hw_write_start(map, reg, 1);
2421 reg = regmap_reg_addr(map, reg);
2422 map->format.format_reg(u8, reg, map->reg_shift);
2423 u8 += reg_bytes + pad_bytes;
2424 map->format.format_val(u8, val, 0);
2425 u8 += val_bytes;
2426 }
2427 u8 = buf;
2428 *u8 |= map->write_flag_mask;
2429
2430 ret = map->write(map->bus_context, buf, len);
2431
2432 kfree(buf);
2433
2434 for (i = 0; i < num_regs; i++) {
2435 int reg = regs[i].reg;
2436 trace_regmap_hw_write_done(map, reg, 1);
2437 }
2438 return ret;
2439 }
2440
_regmap_register_page(struct regmap * map,unsigned int reg,struct regmap_range_node * range)2441 static unsigned int _regmap_register_page(struct regmap *map,
2442 unsigned int reg,
2443 struct regmap_range_node *range)
2444 {
2445 unsigned int win_page = (reg - range->range_min) / range->window_len;
2446
2447 return win_page;
2448 }
2449
_regmap_range_multi_paged_reg_write(struct regmap * map,struct reg_sequence * regs,size_t num_regs)2450 static int _regmap_range_multi_paged_reg_write(struct regmap *map,
2451 struct reg_sequence *regs,
2452 size_t num_regs)
2453 {
2454 int ret;
2455 int i, n;
2456 struct reg_sequence *base;
2457 unsigned int this_page = 0;
2458 unsigned int page_change = 0;
2459 /*
2460 * the set of registers are not neccessarily in order, but
2461 * since the order of write must be preserved this algorithm
2462 * chops the set each time the page changes. This also applies
2463 * if there is a delay required at any point in the sequence.
2464 */
2465 base = regs;
2466 for (i = 0, n = 0; i < num_regs; i++, n++) {
2467 unsigned int reg = regs[i].reg;
2468 struct regmap_range_node *range;
2469
2470 range = _regmap_range_lookup(map, reg);
2471 if (range) {
2472 unsigned int win_page = _regmap_register_page(map, reg,
2473 range);
2474
2475 if (i == 0)
2476 this_page = win_page;
2477 if (win_page != this_page) {
2478 this_page = win_page;
2479 page_change = 1;
2480 }
2481 }
2482
2483 /* If we have both a page change and a delay make sure to
2484 * write the regs and apply the delay before we change the
2485 * page.
2486 */
2487
2488 if (page_change || regs[i].delay_us) {
2489
2490 /* For situations where the first write requires
2491 * a delay we need to make sure we don't call
2492 * raw_multi_reg_write with n=0
2493 * This can't occur with page breaks as we
2494 * never write on the first iteration
2495 */
2496 if (regs[i].delay_us && i == 0)
2497 n = 1;
2498
2499 ret = _regmap_raw_multi_reg_write(map, base, n);
2500 if (ret != 0)
2501 return ret;
2502
2503 if (regs[i].delay_us) {
2504 if (map->can_sleep)
2505 fsleep(regs[i].delay_us);
2506 else
2507 udelay(regs[i].delay_us);
2508 }
2509
2510 base += n;
2511 n = 0;
2512
2513 if (page_change) {
2514 ret = _regmap_select_page(map,
2515 &base[n].reg,
2516 range, 1);
2517 if (ret != 0)
2518 return ret;
2519
2520 page_change = 0;
2521 }
2522
2523 }
2524
2525 }
2526 if (n > 0)
2527 return _regmap_raw_multi_reg_write(map, base, n);
2528 return 0;
2529 }
2530
_regmap_multi_reg_write(struct regmap * map,const struct reg_sequence * regs,size_t num_regs)2531 static int _regmap_multi_reg_write(struct regmap *map,
2532 const struct reg_sequence *regs,
2533 size_t num_regs)
2534 {
2535 int i;
2536 int ret;
2537
2538 if (!map->can_multi_write) {
2539 for (i = 0; i < num_regs; i++) {
2540 ret = _regmap_write(map, regs[i].reg, regs[i].def);
2541 if (ret != 0)
2542 return ret;
2543
2544 if (regs[i].delay_us) {
2545 if (map->can_sleep)
2546 fsleep(regs[i].delay_us);
2547 else
2548 udelay(regs[i].delay_us);
2549 }
2550 }
2551 return 0;
2552 }
2553
2554 if (!map->format.parse_inplace)
2555 return -EINVAL;
2556
2557 if (map->writeable_reg)
2558 for (i = 0; i < num_regs; i++) {
2559 int reg = regs[i].reg;
2560 if (!map->writeable_reg(map->dev, reg))
2561 return -EINVAL;
2562 if (!IS_ALIGNED(reg, map->reg_stride))
2563 return -EINVAL;
2564 }
2565
2566 if (!map->cache_bypass) {
2567 for (i = 0; i < num_regs; i++) {
2568 unsigned int val = regs[i].def;
2569 unsigned int reg = regs[i].reg;
2570 ret = regcache_write(map, reg, val);
2571 if (ret) {
2572 dev_err(map->dev,
2573 "Error in caching of register: %x ret: %d\n",
2574 reg, ret);
2575 return ret;
2576 }
2577 }
2578 if (map->cache_only) {
2579 map->cache_dirty = true;
2580 return 0;
2581 }
2582 }
2583
2584 WARN_ON(!map->bus);
2585
2586 for (i = 0; i < num_regs; i++) {
2587 unsigned int reg = regs[i].reg;
2588 struct regmap_range_node *range;
2589
2590 /* Coalesce all the writes between a page break or a delay
2591 * in a sequence
2592 */
2593 range = _regmap_range_lookup(map, reg);
2594 if (range || regs[i].delay_us) {
2595 size_t len = sizeof(struct reg_sequence)*num_regs;
2596 struct reg_sequence *base = kmemdup(regs, len,
2597 GFP_KERNEL);
2598 if (!base)
2599 return -ENOMEM;
2600 ret = _regmap_range_multi_paged_reg_write(map, base,
2601 num_regs);
2602 kfree(base);
2603
2604 return ret;
2605 }
2606 }
2607 return _regmap_raw_multi_reg_write(map, regs, num_regs);
2608 }
2609
2610 /**
2611 * regmap_multi_reg_write() - Write multiple registers to the device
2612 *
2613 * @map: Register map to write to
2614 * @regs: Array of structures containing register,value to be written
2615 * @num_regs: Number of registers to write
2616 *
2617 * Write multiple registers to the device where the set of register, value
2618 * pairs are supplied in any order, possibly not all in a single range.
2619 *
2620 * The 'normal' block write mode will send ultimately send data on the
2621 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2622 * addressed. However, this alternative block multi write mode will send
2623 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2624 * must of course support the mode.
2625 *
2626 * A value of zero will be returned on success, a negative errno will be
2627 * returned in error cases.
2628 */
regmap_multi_reg_write(struct regmap * map,const struct reg_sequence * regs,int num_regs)2629 int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2630 int num_regs)
2631 {
2632 int ret;
2633
2634 map->lock(map->lock_arg);
2635
2636 ret = _regmap_multi_reg_write(map, regs, num_regs);
2637
2638 map->unlock(map->lock_arg);
2639
2640 return ret;
2641 }
2642 EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2643
2644 /**
2645 * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2646 * device but not the cache
2647 *
2648 * @map: Register map to write to
2649 * @regs: Array of structures containing register,value to be written
2650 * @num_regs: Number of registers to write
2651 *
2652 * Write multiple registers to the device but not the cache where the set
2653 * of register are supplied in any order.
2654 *
2655 * This function is intended to be used for writing a large block of data
2656 * atomically to the device in single transfer for those I2C client devices
2657 * that implement this alternative block write mode.
2658 *
2659 * A value of zero will be returned on success, a negative errno will
2660 * be returned in error cases.
2661 */
regmap_multi_reg_write_bypassed(struct regmap * map,const struct reg_sequence * regs,int num_regs)2662 int regmap_multi_reg_write_bypassed(struct regmap *map,
2663 const struct reg_sequence *regs,
2664 int num_regs)
2665 {
2666 int ret;
2667 bool bypass;
2668
2669 map->lock(map->lock_arg);
2670
2671 bypass = map->cache_bypass;
2672 map->cache_bypass = true;
2673
2674 ret = _regmap_multi_reg_write(map, regs, num_regs);
2675
2676 map->cache_bypass = bypass;
2677
2678 map->unlock(map->lock_arg);
2679
2680 return ret;
2681 }
2682 EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2683
2684 /**
2685 * regmap_raw_write_async() - Write raw values to one or more registers
2686 * asynchronously
2687 *
2688 * @map: Register map to write to
2689 * @reg: Initial register to write to
2690 * @val: Block of data to be written, laid out for direct transmission to the
2691 * device. Must be valid until regmap_async_complete() is called.
2692 * @val_len: Length of data pointed to by val.
2693 *
2694 * This function is intended to be used for things like firmware
2695 * download where a large block of data needs to be transferred to the
2696 * device. No formatting will be done on the data provided.
2697 *
2698 * If supported by the underlying bus the write will be scheduled
2699 * asynchronously, helping maximise I/O speed on higher speed buses
2700 * like SPI. regmap_async_complete() can be called to ensure that all
2701 * asynchrnous writes have been completed.
2702 *
2703 * A value of zero will be returned on success, a negative errno will
2704 * be returned in error cases.
2705 */
regmap_raw_write_async(struct regmap * map,unsigned int reg,const void * val,size_t val_len)2706 int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2707 const void *val, size_t val_len)
2708 {
2709 int ret;
2710
2711 if (val_len % map->format.val_bytes)
2712 return -EINVAL;
2713 if (!IS_ALIGNED(reg, map->reg_stride))
2714 return -EINVAL;
2715
2716 map->lock(map->lock_arg);
2717
2718 map->async = true;
2719
2720 ret = _regmap_raw_write(map, reg, val, val_len, false);
2721
2722 map->async = false;
2723
2724 map->unlock(map->lock_arg);
2725
2726 return ret;
2727 }
2728 EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2729
_regmap_raw_read(struct regmap * map,unsigned int reg,void * val,unsigned int val_len,bool noinc)2730 static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2731 unsigned int val_len, bool noinc)
2732 {
2733 struct regmap_range_node *range;
2734 int ret;
2735
2736 if (!map->read)
2737 return -EINVAL;
2738
2739 range = _regmap_range_lookup(map, reg);
2740 if (range) {
2741 ret = _regmap_select_page(map, ®, range,
2742 noinc ? 1 : val_len / map->format.val_bytes);
2743 if (ret != 0)
2744 return ret;
2745 }
2746
2747 reg = regmap_reg_addr(map, reg);
2748 map->format.format_reg(map->work_buf, reg, map->reg_shift);
2749 regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2750 map->read_flag_mask);
2751 trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2752
2753 ret = map->read(map->bus_context, map->work_buf,
2754 map->format.reg_bytes + map->format.pad_bytes,
2755 val, val_len);
2756
2757 trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2758
2759 return ret;
2760 }
2761
_regmap_bus_reg_read(void * context,unsigned int reg,unsigned int * val)2762 static int _regmap_bus_reg_read(void *context, unsigned int reg,
2763 unsigned int *val)
2764 {
2765 struct regmap *map = context;
2766 struct regmap_range_node *range;
2767 int ret;
2768
2769 range = _regmap_range_lookup(map, reg);
2770 if (range) {
2771 ret = _regmap_select_page(map, ®, range, 1);
2772 if (ret != 0)
2773 return ret;
2774 }
2775
2776 reg = regmap_reg_addr(map, reg);
2777 return map->bus->reg_read(map->bus_context, reg, val);
2778 }
2779
_regmap_bus_read(void * context,unsigned int reg,unsigned int * val)2780 static int _regmap_bus_read(void *context, unsigned int reg,
2781 unsigned int *val)
2782 {
2783 int ret;
2784 struct regmap *map = context;
2785 void *work_val = map->work_buf + map->format.reg_bytes +
2786 map->format.pad_bytes;
2787
2788 if (!map->format.parse_val)
2789 return -EINVAL;
2790
2791 ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
2792 if (ret == 0)
2793 *val = map->format.parse_val(work_val);
2794
2795 return ret;
2796 }
2797
_regmap_read(struct regmap * map,unsigned int reg,unsigned int * val)2798 static int _regmap_read(struct regmap *map, unsigned int reg,
2799 unsigned int *val)
2800 {
2801 int ret;
2802 void *context = _regmap_map_get_context(map);
2803
2804 if (!map->cache_bypass) {
2805 ret = regcache_read(map, reg, val);
2806 if (ret == 0)
2807 return 0;
2808 }
2809
2810 if (map->cache_only)
2811 return -EBUSY;
2812
2813 if (!regmap_readable(map, reg))
2814 return -EIO;
2815
2816 ret = map->reg_read(context, reg, val);
2817 if (ret == 0) {
2818 if (regmap_should_log(map))
2819 dev_info(map->dev, "%x => %x\n", reg, *val);
2820
2821 trace_regmap_reg_read(map, reg, *val);
2822
2823 if (!map->cache_bypass)
2824 regcache_write(map, reg, *val);
2825 }
2826
2827 return ret;
2828 }
2829
2830 /**
2831 * regmap_read() - Read a value from a single register
2832 *
2833 * @map: Register map to read from
2834 * @reg: Register to be read from
2835 * @val: Pointer to store read value
2836 *
2837 * A value of zero will be returned on success, a negative errno will
2838 * be returned in error cases.
2839 */
regmap_read(struct regmap * map,unsigned int reg,unsigned int * val)2840 int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2841 {
2842 int ret;
2843
2844 if (!IS_ALIGNED(reg, map->reg_stride))
2845 return -EINVAL;
2846
2847 map->lock(map->lock_arg);
2848
2849 ret = _regmap_read(map, reg, val);
2850
2851 map->unlock(map->lock_arg);
2852
2853 return ret;
2854 }
2855 EXPORT_SYMBOL_GPL(regmap_read);
2856
2857 /**
2858 * regmap_read_bypassed() - Read a value from a single register direct
2859 * from the device, bypassing the cache
2860 *
2861 * @map: Register map to read from
2862 * @reg: Register to be read from
2863 * @val: Pointer to store read value
2864 *
2865 * A value of zero will be returned on success, a negative errno will
2866 * be returned in error cases.
2867 */
regmap_read_bypassed(struct regmap * map,unsigned int reg,unsigned int * val)2868 int regmap_read_bypassed(struct regmap *map, unsigned int reg, unsigned int *val)
2869 {
2870 int ret;
2871 bool bypass, cache_only;
2872
2873 if (!IS_ALIGNED(reg, map->reg_stride))
2874 return -EINVAL;
2875
2876 map->lock(map->lock_arg);
2877
2878 bypass = map->cache_bypass;
2879 cache_only = map->cache_only;
2880 map->cache_bypass = true;
2881 map->cache_only = false;
2882
2883 ret = _regmap_read(map, reg, val);
2884
2885 map->cache_bypass = bypass;
2886 map->cache_only = cache_only;
2887
2888 map->unlock(map->lock_arg);
2889
2890 return ret;
2891 }
2892 EXPORT_SYMBOL_GPL(regmap_read_bypassed);
2893
2894 /**
2895 * regmap_raw_read() - Read raw data from the device
2896 *
2897 * @map: Register map to read from
2898 * @reg: First register to be read from
2899 * @val: Pointer to store read value
2900 * @val_len: Size of data to read
2901 *
2902 * A value of zero will be returned on success, a negative errno will
2903 * be returned in error cases.
2904 */
regmap_raw_read(struct regmap * map,unsigned int reg,void * val,size_t val_len)2905 int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2906 size_t val_len)
2907 {
2908 size_t val_bytes = map->format.val_bytes;
2909 size_t val_count = val_len / val_bytes;
2910 unsigned int v;
2911 int ret, i;
2912
2913 if (val_len % map->format.val_bytes)
2914 return -EINVAL;
2915 if (!IS_ALIGNED(reg, map->reg_stride))
2916 return -EINVAL;
2917 if (val_count == 0)
2918 return -EINVAL;
2919
2920 map->lock(map->lock_arg);
2921
2922 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2923 map->cache_type == REGCACHE_NONE) {
2924 size_t chunk_count, chunk_bytes;
2925 size_t chunk_regs = val_count;
2926
2927 if (!map->cache_bypass && map->cache_only) {
2928 ret = -EBUSY;
2929 goto out;
2930 }
2931
2932 if (!map->read) {
2933 ret = -ENOTSUPP;
2934 goto out;
2935 }
2936
2937 if (map->use_single_read)
2938 chunk_regs = 1;
2939 else if (map->max_raw_read && val_len > map->max_raw_read)
2940 chunk_regs = map->max_raw_read / val_bytes;
2941
2942 chunk_count = val_count / chunk_regs;
2943 chunk_bytes = chunk_regs * val_bytes;
2944
2945 /* Read bytes that fit into whole chunks */
2946 for (i = 0; i < chunk_count; i++) {
2947 ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
2948 if (ret != 0)
2949 goto out;
2950
2951 reg += regmap_get_offset(map, chunk_regs);
2952 val += chunk_bytes;
2953 val_len -= chunk_bytes;
2954 }
2955
2956 /* Read remaining bytes */
2957 if (val_len) {
2958 ret = _regmap_raw_read(map, reg, val, val_len, false);
2959 if (ret != 0)
2960 goto out;
2961 }
2962 } else {
2963 /* Otherwise go word by word for the cache; should be low
2964 * cost as we expect to hit the cache.
2965 */
2966 for (i = 0; i < val_count; i++) {
2967 ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2968 &v);
2969 if (ret != 0)
2970 goto out;
2971
2972 map->format.format_val(val + (i * val_bytes), v, 0);
2973 }
2974 }
2975
2976 out:
2977 map->unlock(map->lock_arg);
2978
2979 return ret;
2980 }
2981 EXPORT_SYMBOL_GPL(regmap_raw_read);
2982
2983 /**
2984 * regmap_noinc_read(): Read data from a register without incrementing the
2985 * register number
2986 *
2987 * @map: Register map to read from
2988 * @reg: Register to read from
2989 * @val: Pointer to data buffer
2990 * @val_len: Length of output buffer in bytes.
2991 *
2992 * The regmap API usually assumes that bulk read operations will read a
2993 * range of registers. Some devices have certain registers for which a read
2994 * operation read will read from an internal FIFO.
2995 *
2996 * The target register must be volatile but registers after it can be
2997 * completely unrelated cacheable registers.
2998 *
2999 * This will attempt multiple reads as required to read val_len bytes.
3000 *
3001 * A value of zero will be returned on success, a negative errno will be
3002 * returned in error cases.
3003 */
regmap_noinc_read(struct regmap * map,unsigned int reg,void * val,size_t val_len)3004 int regmap_noinc_read(struct regmap *map, unsigned int reg,
3005 void *val, size_t val_len)
3006 {
3007 size_t read_len;
3008 int ret;
3009
3010 if (!map->read)
3011 return -ENOTSUPP;
3012
3013 if (val_len % map->format.val_bytes)
3014 return -EINVAL;
3015 if (!IS_ALIGNED(reg, map->reg_stride))
3016 return -EINVAL;
3017 if (val_len == 0)
3018 return -EINVAL;
3019
3020 map->lock(map->lock_arg);
3021
3022 if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
3023 ret = -EINVAL;
3024 goto out_unlock;
3025 }
3026
3027 /*
3028 * We have not defined the FIFO semantics for cache, as the
3029 * cache is just one value deep. Should we return the last
3030 * written value? Just avoid this by always reading the FIFO
3031 * even when using cache. Cache only will not work.
3032 */
3033 if (!map->cache_bypass && map->cache_only) {
3034 ret = -EBUSY;
3035 goto out_unlock;
3036 }
3037
3038 /* Use the accelerated operation if we can */
3039 if (map->bus->reg_noinc_read) {
3040 ret = regmap_noinc_readwrite(map, reg, val, val_len, false);
3041 goto out_unlock;
3042 }
3043
3044 while (val_len) {
3045 if (map->max_raw_read && map->max_raw_read < val_len)
3046 read_len = map->max_raw_read;
3047 else
3048 read_len = val_len;
3049 ret = _regmap_raw_read(map, reg, val, read_len, true);
3050 if (ret)
3051 goto out_unlock;
3052 val = ((u8 *)val) + read_len;
3053 val_len -= read_len;
3054 }
3055
3056 out_unlock:
3057 map->unlock(map->lock_arg);
3058 return ret;
3059 }
3060 EXPORT_SYMBOL_GPL(regmap_noinc_read);
3061
3062 /**
3063 * regmap_field_read(): Read a value to a single register field
3064 *
3065 * @field: Register field to read from
3066 * @val: Pointer to store read value
3067 *
3068 * A value of zero will be returned on success, a negative errno will
3069 * be returned in error cases.
3070 */
regmap_field_read(struct regmap_field * field,unsigned int * val)3071 int regmap_field_read(struct regmap_field *field, unsigned int *val)
3072 {
3073 int ret;
3074 unsigned int reg_val;
3075 ret = regmap_read(field->regmap, field->reg, ®_val);
3076 if (ret != 0)
3077 return ret;
3078
3079 reg_val &= field->mask;
3080 reg_val >>= field->shift;
3081 *val = reg_val;
3082
3083 return ret;
3084 }
3085 EXPORT_SYMBOL_GPL(regmap_field_read);
3086
3087 /**
3088 * regmap_fields_read() - Read a value to a single register field with port ID
3089 *
3090 * @field: Register field to read from
3091 * @id: port ID
3092 * @val: Pointer to store read value
3093 *
3094 * A value of zero will be returned on success, a negative errno will
3095 * be returned in error cases.
3096 */
regmap_fields_read(struct regmap_field * field,unsigned int id,unsigned int * val)3097 int regmap_fields_read(struct regmap_field *field, unsigned int id,
3098 unsigned int *val)
3099 {
3100 int ret;
3101 unsigned int reg_val;
3102
3103 if (id >= field->id_size)
3104 return -EINVAL;
3105
3106 ret = regmap_read(field->regmap,
3107 field->reg + (field->id_offset * id),
3108 ®_val);
3109 if (ret != 0)
3110 return ret;
3111
3112 reg_val &= field->mask;
3113 reg_val >>= field->shift;
3114 *val = reg_val;
3115
3116 return ret;
3117 }
3118 EXPORT_SYMBOL_GPL(regmap_fields_read);
3119
_regmap_bulk_read(struct regmap * map,unsigned int reg,unsigned int * regs,void * val,size_t val_count)3120 static int _regmap_bulk_read(struct regmap *map, unsigned int reg,
3121 unsigned int *regs, void *val, size_t val_count)
3122 {
3123 u32 *u32 = val;
3124 u16 *u16 = val;
3125 u8 *u8 = val;
3126 int ret, i;
3127
3128 map->lock(map->lock_arg);
3129
3130 for (i = 0; i < val_count; i++) {
3131 unsigned int ival;
3132
3133 if (regs) {
3134 if (!IS_ALIGNED(regs[i], map->reg_stride)) {
3135 ret = -EINVAL;
3136 goto out;
3137 }
3138 ret = _regmap_read(map, regs[i], &ival);
3139 } else {
3140 ret = _regmap_read(map, reg + regmap_get_offset(map, i), &ival);
3141 }
3142 if (ret != 0)
3143 goto out;
3144
3145 switch (map->format.val_bytes) {
3146 case 4:
3147 u32[i] = ival;
3148 break;
3149 case 2:
3150 u16[i] = ival;
3151 break;
3152 case 1:
3153 u8[i] = ival;
3154 break;
3155 default:
3156 ret = -EINVAL;
3157 goto out;
3158 }
3159 }
3160 out:
3161 map->unlock(map->lock_arg);
3162 return ret;
3163 }
3164
3165 /**
3166 * regmap_bulk_read() - Read multiple sequential registers from the device
3167 *
3168 * @map: Register map to read from
3169 * @reg: First register to be read from
3170 * @val: Pointer to store read value, in native register size for device
3171 * @val_count: Number of registers to read
3172 *
3173 * A value of zero will be returned on success, a negative errno will
3174 * be returned in error cases.
3175 */
regmap_bulk_read(struct regmap * map,unsigned int reg,void * val,size_t val_count)3176 int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
3177 size_t val_count)
3178 {
3179 int ret, i;
3180 size_t val_bytes = map->format.val_bytes;
3181 bool vol = regmap_volatile_range(map, reg, val_count);
3182
3183 if (!IS_ALIGNED(reg, map->reg_stride))
3184 return -EINVAL;
3185 if (val_count == 0)
3186 return -EINVAL;
3187
3188 if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
3189 ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
3190 if (ret != 0)
3191 return ret;
3192
3193 for (i = 0; i < val_count * val_bytes; i += val_bytes)
3194 map->format.parse_inplace(val + i);
3195 } else {
3196 ret = _regmap_bulk_read(map, reg, NULL, val, val_count);
3197 }
3198 if (!ret)
3199 trace_regmap_bulk_read(map, reg, val, val_bytes * val_count);
3200 return ret;
3201 }
3202 EXPORT_SYMBOL_GPL(regmap_bulk_read);
3203
3204 /**
3205 * regmap_multi_reg_read() - Read multiple non-sequential registers from the device
3206 *
3207 * @map: Register map to read from
3208 * @regs: Array of registers to read from
3209 * @val: Pointer to store read value, in native register size for device
3210 * @val_count: Number of registers to read
3211 *
3212 * A value of zero will be returned on success, a negative errno will
3213 * be returned in error cases.
3214 */
regmap_multi_reg_read(struct regmap * map,unsigned int * regs,void * val,size_t val_count)3215 int regmap_multi_reg_read(struct regmap *map, unsigned int *regs, void *val,
3216 size_t val_count)
3217 {
3218 if (val_count == 0)
3219 return -EINVAL;
3220
3221 return _regmap_bulk_read(map, 0, regs, val, val_count);
3222 }
3223 EXPORT_SYMBOL_GPL(regmap_multi_reg_read);
3224
_regmap_update_bits(struct regmap * map,unsigned int reg,unsigned int mask,unsigned int val,bool * change,bool force_write)3225 static int _regmap_update_bits(struct regmap *map, unsigned int reg,
3226 unsigned int mask, unsigned int val,
3227 bool *change, bool force_write)
3228 {
3229 int ret;
3230 unsigned int tmp, orig;
3231
3232 if (change)
3233 *change = false;
3234
3235 if (regmap_volatile(map, reg) && map->reg_update_bits) {
3236 reg = regmap_reg_addr(map, reg);
3237 ret = map->reg_update_bits(map->bus_context, reg, mask, val);
3238 if (ret == 0 && change)
3239 *change = true;
3240 } else {
3241 ret = _regmap_read(map, reg, &orig);
3242 if (ret != 0)
3243 return ret;
3244
3245 tmp = orig & ~mask;
3246 tmp |= val & mask;
3247
3248 if (force_write || (tmp != orig) || map->force_write_field) {
3249 ret = _regmap_write(map, reg, tmp);
3250 if (ret == 0 && change)
3251 *change = true;
3252 }
3253 }
3254
3255 return ret;
3256 }
3257
3258 /**
3259 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
3260 *
3261 * @map: Register map to update
3262 * @reg: Register to update
3263 * @mask: Bitmask to change
3264 * @val: New value for bitmask
3265 * @change: Boolean indicating if a write was done
3266 * @async: Boolean indicating asynchronously
3267 * @force: Boolean indicating use force update
3268 *
3269 * Perform a read/modify/write cycle on a register map with change, async, force
3270 * options.
3271 *
3272 * If async is true:
3273 *
3274 * With most buses the read must be done synchronously so this is most useful
3275 * for devices with a cache which do not need to interact with the hardware to
3276 * determine the current register value.
3277 *
3278 * Returns zero for success, a negative number on error.
3279 */
regmap_update_bits_base(struct regmap * map,unsigned int reg,unsigned int mask,unsigned int val,bool * change,bool async,bool force)3280 int regmap_update_bits_base(struct regmap *map, unsigned int reg,
3281 unsigned int mask, unsigned int val,
3282 bool *change, bool async, bool force)
3283 {
3284 int ret;
3285
3286 map->lock(map->lock_arg);
3287
3288 map->async = async;
3289
3290 ret = _regmap_update_bits(map, reg, mask, val, change, force);
3291
3292 map->async = false;
3293
3294 map->unlock(map->lock_arg);
3295
3296 return ret;
3297 }
3298 EXPORT_SYMBOL_GPL(regmap_update_bits_base);
3299
3300 /**
3301 * regmap_test_bits() - Check if all specified bits are set in a register.
3302 *
3303 * @map: Register map to operate on
3304 * @reg: Register to read from
3305 * @bits: Bits to test
3306 *
3307 * Returns 0 if at least one of the tested bits is not set, 1 if all tested
3308 * bits are set and a negative error number if the underlying regmap_read()
3309 * fails.
3310 */
regmap_test_bits(struct regmap * map,unsigned int reg,unsigned int bits)3311 int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
3312 {
3313 unsigned int val, ret;
3314
3315 ret = regmap_read(map, reg, &val);
3316 if (ret)
3317 return ret;
3318
3319 return (val & bits) == bits;
3320 }
3321 EXPORT_SYMBOL_GPL(regmap_test_bits);
3322
regmap_async_complete_cb(struct regmap_async * async,int ret)3323 void regmap_async_complete_cb(struct regmap_async *async, int ret)
3324 {
3325 struct regmap *map = async->map;
3326 bool wake;
3327
3328 trace_regmap_async_io_complete(map);
3329
3330 spin_lock(&map->async_lock);
3331 list_move(&async->list, &map->async_free);
3332 wake = list_empty(&map->async_list);
3333
3334 if (ret != 0)
3335 map->async_ret = ret;
3336
3337 spin_unlock(&map->async_lock);
3338
3339 if (wake)
3340 wake_up(&map->async_waitq);
3341 }
3342 EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
3343
regmap_async_is_done(struct regmap * map)3344 static int regmap_async_is_done(struct regmap *map)
3345 {
3346 unsigned long flags;
3347 int ret;
3348
3349 spin_lock_irqsave(&map->async_lock, flags);
3350 ret = list_empty(&map->async_list);
3351 spin_unlock_irqrestore(&map->async_lock, flags);
3352
3353 return ret;
3354 }
3355
3356 /**
3357 * regmap_async_complete - Ensure all asynchronous I/O has completed.
3358 *
3359 * @map: Map to operate on.
3360 *
3361 * Blocks until any pending asynchronous I/O has completed. Returns
3362 * an error code for any failed I/O operations.
3363 */
regmap_async_complete(struct regmap * map)3364 int regmap_async_complete(struct regmap *map)
3365 {
3366 unsigned long flags;
3367 int ret;
3368
3369 /* Nothing to do with no async support */
3370 if (!map->bus || !map->bus->async_write)
3371 return 0;
3372
3373 trace_regmap_async_complete_start(map);
3374
3375 wait_event(map->async_waitq, regmap_async_is_done(map));
3376
3377 spin_lock_irqsave(&map->async_lock, flags);
3378 ret = map->async_ret;
3379 map->async_ret = 0;
3380 spin_unlock_irqrestore(&map->async_lock, flags);
3381
3382 trace_regmap_async_complete_done(map);
3383
3384 return ret;
3385 }
3386 EXPORT_SYMBOL_GPL(regmap_async_complete);
3387
3388 /**
3389 * regmap_register_patch - Register and apply register updates to be applied
3390 * on device initialistion
3391 *
3392 * @map: Register map to apply updates to.
3393 * @regs: Values to update.
3394 * @num_regs: Number of entries in regs.
3395 *
3396 * Register a set of register updates to be applied to the device
3397 * whenever the device registers are synchronised with the cache and
3398 * apply them immediately. Typically this is used to apply
3399 * corrections to be applied to the device defaults on startup, such
3400 * as the updates some vendors provide to undocumented registers.
3401 *
3402 * The caller must ensure that this function cannot be called
3403 * concurrently with either itself or regcache_sync().
3404 */
regmap_register_patch(struct regmap * map,const struct reg_sequence * regs,int num_regs)3405 int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
3406 int num_regs)
3407 {
3408 struct reg_sequence *p;
3409 int ret;
3410 bool bypass;
3411
3412 if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
3413 num_regs))
3414 return 0;
3415
3416 p = krealloc(map->patch,
3417 sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
3418 GFP_KERNEL);
3419 if (p) {
3420 memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
3421 map->patch = p;
3422 map->patch_regs += num_regs;
3423 } else {
3424 return -ENOMEM;
3425 }
3426
3427 map->lock(map->lock_arg);
3428
3429 bypass = map->cache_bypass;
3430
3431 map->cache_bypass = true;
3432 map->async = true;
3433
3434 ret = _regmap_multi_reg_write(map, regs, num_regs);
3435
3436 map->async = false;
3437 map->cache_bypass = bypass;
3438
3439 map->unlock(map->lock_arg);
3440
3441 regmap_async_complete(map);
3442
3443 return ret;
3444 }
3445 EXPORT_SYMBOL_GPL(regmap_register_patch);
3446
3447 /**
3448 * regmap_get_val_bytes() - Report the size of a register value
3449 *
3450 * @map: Register map to operate on.
3451 *
3452 * Report the size of a register value, mainly intended to for use by
3453 * generic infrastructure built on top of regmap.
3454 */
regmap_get_val_bytes(struct regmap * map)3455 int regmap_get_val_bytes(struct regmap *map)
3456 {
3457 if (map->format.format_write)
3458 return -EINVAL;
3459
3460 return map->format.val_bytes;
3461 }
3462 EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
3463
3464 /**
3465 * regmap_get_max_register() - Report the max register value
3466 *
3467 * @map: Register map to operate on.
3468 *
3469 * Report the max register value, mainly intended to for use by
3470 * generic infrastructure built on top of regmap.
3471 */
regmap_get_max_register(struct regmap * map)3472 int regmap_get_max_register(struct regmap *map)
3473 {
3474 return map->max_register_is_set ? map->max_register : -EINVAL;
3475 }
3476 EXPORT_SYMBOL_GPL(regmap_get_max_register);
3477
3478 /**
3479 * regmap_get_reg_stride() - Report the register address stride
3480 *
3481 * @map: Register map to operate on.
3482 *
3483 * Report the register address stride, mainly intended to for use by
3484 * generic infrastructure built on top of regmap.
3485 */
regmap_get_reg_stride(struct regmap * map)3486 int regmap_get_reg_stride(struct regmap *map)
3487 {
3488 return map->reg_stride;
3489 }
3490 EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
3491
3492 /**
3493 * regmap_might_sleep() - Returns whether a regmap access might sleep.
3494 *
3495 * @map: Register map to operate on.
3496 *
3497 * Returns true if an access to the register might sleep, else false.
3498 */
regmap_might_sleep(struct regmap * map)3499 bool regmap_might_sleep(struct regmap *map)
3500 {
3501 return map->can_sleep;
3502 }
3503 EXPORT_SYMBOL_GPL(regmap_might_sleep);
3504
regmap_parse_val(struct regmap * map,const void * buf,unsigned int * val)3505 int regmap_parse_val(struct regmap *map, const void *buf,
3506 unsigned int *val)
3507 {
3508 if (!map->format.parse_val)
3509 return -EINVAL;
3510
3511 *val = map->format.parse_val(buf);
3512
3513 return 0;
3514 }
3515 EXPORT_SYMBOL_GPL(regmap_parse_val);
3516
regmap_initcall(void)3517 static int __init regmap_initcall(void)
3518 {
3519 regmap_debugfs_initcall();
3520
3521 return 0;
3522 }
3523 postcore_initcall(regmap_initcall);
3524