• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Register cache access API
3  *
4  * Copyright 2011 Wolfson Microelectronics plc
5  *
6  * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12 
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/device.h>
16 #include <trace/events/regmap.h>
17 #include <linux/bsearch.h>
18 #include <linux/sort.h>
19 
20 #include "internal.h"
21 
22 static const struct regcache_ops *cache_types[] = {
23 	&regcache_rbtree_ops,
24 	&regcache_lzo_ops,
25 	&regcache_flat_ops,
26 };
27 
regcache_hw_init(struct regmap * map)28 static int regcache_hw_init(struct regmap *map)
29 {
30 	int i, j;
31 	int ret;
32 	int count;
33 	unsigned int val;
34 	void *tmp_buf;
35 
36 	if (!map->num_reg_defaults_raw)
37 		return -EINVAL;
38 
39 	if (!map->reg_defaults_raw) {
40 		u32 cache_bypass = map->cache_bypass;
41 		dev_warn(map->dev, "No cache defaults, reading back from HW\n");
42 
43 		/* Bypass the cache access till data read from HW*/
44 		map->cache_bypass = 1;
45 		tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
46 		if (!tmp_buf)
47 			return -EINVAL;
48 		ret = regmap_raw_read(map, 0, tmp_buf,
49 				      map->num_reg_defaults_raw);
50 		map->cache_bypass = cache_bypass;
51 		if (ret < 0) {
52 			kfree(tmp_buf);
53 			return ret;
54 		}
55 		map->reg_defaults_raw = tmp_buf;
56 		map->cache_free = 1;
57 	}
58 
59 	/* calculate the size of reg_defaults */
60 	for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
61 		val = regcache_get_val(map, map->reg_defaults_raw, i);
62 		if (regmap_volatile(map, i * map->reg_stride))
63 			continue;
64 		count++;
65 	}
66 
67 	map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
68 				      GFP_KERNEL);
69 	if (!map->reg_defaults) {
70 		ret = -ENOMEM;
71 		goto err_free;
72 	}
73 
74 	/* fill the reg_defaults */
75 	map->num_reg_defaults = count;
76 	for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
77 		val = regcache_get_val(map, map->reg_defaults_raw, i);
78 		if (regmap_volatile(map, i * map->reg_stride))
79 			continue;
80 		map->reg_defaults[j].reg = i * map->reg_stride;
81 		map->reg_defaults[j].def = val;
82 		j++;
83 	}
84 
85 	return 0;
86 
87 err_free:
88 	if (map->cache_free)
89 		kfree(map->reg_defaults_raw);
90 
91 	return ret;
92 }
93 
regcache_init(struct regmap * map,const struct regmap_config * config)94 int regcache_init(struct regmap *map, const struct regmap_config *config)
95 {
96 	int ret;
97 	int i;
98 	void *tmp_buf;
99 
100 	for (i = 0; i < config->num_reg_defaults; i++)
101 		if (config->reg_defaults[i].reg % map->reg_stride)
102 			return -EINVAL;
103 
104 	if (map->cache_type == REGCACHE_NONE) {
105 		map->cache_bypass = true;
106 		return 0;
107 	}
108 
109 	for (i = 0; i < ARRAY_SIZE(cache_types); i++)
110 		if (cache_types[i]->type == map->cache_type)
111 			break;
112 
113 	if (i == ARRAY_SIZE(cache_types)) {
114 		dev_err(map->dev, "Could not match compress type: %d\n",
115 			map->cache_type);
116 		return -EINVAL;
117 	}
118 
119 	map->num_reg_defaults = config->num_reg_defaults;
120 	map->num_reg_defaults_raw = config->num_reg_defaults_raw;
121 	map->reg_defaults_raw = config->reg_defaults_raw;
122 	map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
123 	map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
124 	map->cache_present = NULL;
125 	map->cache_present_nbits = 0;
126 
127 	map->cache = NULL;
128 	map->cache_ops = cache_types[i];
129 
130 	if (!map->cache_ops->read ||
131 	    !map->cache_ops->write ||
132 	    !map->cache_ops->name)
133 		return -EINVAL;
134 
135 	/* We still need to ensure that the reg_defaults
136 	 * won't vanish from under us.  We'll need to make
137 	 * a copy of it.
138 	 */
139 	if (config->reg_defaults) {
140 		if (!map->num_reg_defaults)
141 			return -EINVAL;
142 		tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
143 				  sizeof(struct reg_default), GFP_KERNEL);
144 		if (!tmp_buf)
145 			return -ENOMEM;
146 		map->reg_defaults = tmp_buf;
147 	} else if (map->num_reg_defaults_raw) {
148 		/* Some devices such as PMICs don't have cache defaults,
149 		 * we cope with this by reading back the HW registers and
150 		 * crafting the cache defaults by hand.
151 		 */
152 		ret = regcache_hw_init(map);
153 		if (ret < 0)
154 			return ret;
155 	}
156 
157 	if (!map->max_register)
158 		map->max_register = map->num_reg_defaults_raw;
159 
160 	if (map->cache_ops->init) {
161 		dev_dbg(map->dev, "Initializing %s cache\n",
162 			map->cache_ops->name);
163 		ret = map->cache_ops->init(map);
164 		if (ret)
165 			goto err_free;
166 	}
167 	return 0;
168 
169 err_free:
170 	kfree(map->reg_defaults);
171 	if (map->cache_free)
172 		kfree(map->reg_defaults_raw);
173 
174 	return ret;
175 }
176 
regcache_exit(struct regmap * map)177 void regcache_exit(struct regmap *map)
178 {
179 	if (map->cache_type == REGCACHE_NONE)
180 		return;
181 
182 	BUG_ON(!map->cache_ops);
183 
184 	kfree(map->cache_present);
185 	kfree(map->reg_defaults);
186 	if (map->cache_free)
187 		kfree(map->reg_defaults_raw);
188 
189 	if (map->cache_ops->exit) {
190 		dev_dbg(map->dev, "Destroying %s cache\n",
191 			map->cache_ops->name);
192 		map->cache_ops->exit(map);
193 	}
194 }
195 
196 /**
197  * regcache_read: Fetch the value of a given register from the cache.
198  *
199  * @map: map to configure.
200  * @reg: The register index.
201  * @value: The value to be returned.
202  *
203  * Return a negative value on failure, 0 on success.
204  */
regcache_read(struct regmap * map,unsigned int reg,unsigned int * value)205 int regcache_read(struct regmap *map,
206 		  unsigned int reg, unsigned int *value)
207 {
208 	int ret;
209 
210 	if (map->cache_type == REGCACHE_NONE)
211 		return -ENOSYS;
212 
213 	BUG_ON(!map->cache_ops);
214 
215 	if (!regmap_volatile(map, reg)) {
216 		ret = map->cache_ops->read(map, reg, value);
217 
218 		if (ret == 0)
219 			trace_regmap_reg_read_cache(map->dev, reg, *value);
220 
221 		return ret;
222 	}
223 
224 	return -EINVAL;
225 }
226 
227 /**
228  * regcache_write: Set the value of a given register in the cache.
229  *
230  * @map: map to configure.
231  * @reg: The register index.
232  * @value: The new register value.
233  *
234  * Return a negative value on failure, 0 on success.
235  */
regcache_write(struct regmap * map,unsigned int reg,unsigned int value)236 int regcache_write(struct regmap *map,
237 		   unsigned int reg, unsigned int value)
238 {
239 	if (map->cache_type == REGCACHE_NONE)
240 		return 0;
241 
242 	BUG_ON(!map->cache_ops);
243 
244 	if (!regmap_writeable(map, reg))
245 		return -EIO;
246 
247 	if (!regmap_volatile(map, reg))
248 		return map->cache_ops->write(map, reg, value);
249 
250 	return 0;
251 }
252 
253 /**
254  * regcache_sync: Sync the register cache with the hardware.
255  *
256  * @map: map to configure.
257  *
258  * Any registers that should not be synced should be marked as
259  * volatile.  In general drivers can choose not to use the provided
260  * syncing functionality if they so require.
261  *
262  * Return a negative value on failure, 0 on success.
263  */
regcache_sync(struct regmap * map)264 int regcache_sync(struct regmap *map)
265 {
266 	int ret = 0;
267 	unsigned int i;
268 	const char *name;
269 	unsigned int bypass;
270 
271 	BUG_ON(!map->cache_ops || !map->cache_ops->sync);
272 
273 	map->lock(map->lock_arg);
274 	/* Remember the initial bypass state */
275 	bypass = map->cache_bypass;
276 	dev_dbg(map->dev, "Syncing %s cache\n",
277 		map->cache_ops->name);
278 	name = map->cache_ops->name;
279 	trace_regcache_sync(map->dev, name, "start");
280 
281 	if (!map->cache_dirty)
282 		goto out;
283 
284 	/* Apply any patch first */
285 	map->cache_bypass = 1;
286 	for (i = 0; i < map->patch_regs; i++) {
287 		if (map->patch[i].reg % map->reg_stride) {
288 			ret = -EINVAL;
289 			goto out;
290 		}
291 		ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
292 		if (ret != 0) {
293 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
294 				map->patch[i].reg, map->patch[i].def, ret);
295 			goto out;
296 		}
297 	}
298 	map->cache_bypass = 0;
299 
300 	ret = map->cache_ops->sync(map, 0, map->max_register);
301 
302 	if (ret == 0)
303 		map->cache_dirty = false;
304 
305 out:
306 	trace_regcache_sync(map->dev, name, "stop");
307 	/* Restore the bypass state */
308 	map->cache_bypass = bypass;
309 	map->unlock(map->lock_arg);
310 
311 	return ret;
312 }
313 EXPORT_SYMBOL_GPL(regcache_sync);
314 
315 /**
316  * regcache_sync_region: Sync part  of the register cache with the hardware.
317  *
318  * @map: map to sync.
319  * @min: first register to sync
320  * @max: last register to sync
321  *
322  * Write all non-default register values in the specified region to
323  * the hardware.
324  *
325  * Return a negative value on failure, 0 on success.
326  */
regcache_sync_region(struct regmap * map,unsigned int min,unsigned int max)327 int regcache_sync_region(struct regmap *map, unsigned int min,
328 			 unsigned int max)
329 {
330 	int ret = 0;
331 	const char *name;
332 	unsigned int bypass;
333 
334 	BUG_ON(!map->cache_ops || !map->cache_ops->sync);
335 
336 	map->lock(map->lock_arg);
337 
338 	/* Remember the initial bypass state */
339 	bypass = map->cache_bypass;
340 
341 	name = map->cache_ops->name;
342 	dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
343 
344 	trace_regcache_sync(map->dev, name, "start region");
345 
346 	if (!map->cache_dirty)
347 		goto out;
348 
349 	ret = map->cache_ops->sync(map, min, max);
350 
351 out:
352 	trace_regcache_sync(map->dev, name, "stop region");
353 	/* Restore the bypass state */
354 	map->cache_bypass = bypass;
355 	map->unlock(map->lock_arg);
356 
357 	return ret;
358 }
359 EXPORT_SYMBOL_GPL(regcache_sync_region);
360 
361 /**
362  * regcache_cache_only: Put a register map into cache only mode
363  *
364  * @map: map to configure
365  * @cache_only: flag if changes should be written to the hardware
366  *
367  * When a register map is marked as cache only writes to the register
368  * map API will only update the register cache, they will not cause
369  * any hardware changes.  This is useful for allowing portions of
370  * drivers to act as though the device were functioning as normal when
371  * it is disabled for power saving reasons.
372  */
regcache_cache_only(struct regmap * map,bool enable)373 void regcache_cache_only(struct regmap *map, bool enable)
374 {
375 	map->lock(map->lock_arg);
376 	WARN_ON(map->cache_bypass && enable);
377 	map->cache_only = enable;
378 	trace_regmap_cache_only(map->dev, enable);
379 	map->unlock(map->lock_arg);
380 }
381 EXPORT_SYMBOL_GPL(regcache_cache_only);
382 
383 /**
384  * regcache_mark_dirty: Mark the register cache as dirty
385  *
386  * @map: map to mark
387  *
388  * Mark the register cache as dirty, for example due to the device
389  * having been powered down for suspend.  If the cache is not marked
390  * as dirty then the cache sync will be suppressed.
391  */
regcache_mark_dirty(struct regmap * map)392 void regcache_mark_dirty(struct regmap *map)
393 {
394 	map->lock(map->lock_arg);
395 	map->cache_dirty = true;
396 	map->unlock(map->lock_arg);
397 }
398 EXPORT_SYMBOL_GPL(regcache_mark_dirty);
399 
400 /**
401  * regcache_cache_bypass: Put a register map into cache bypass mode
402  *
403  * @map: map to configure
404  * @cache_bypass: flag if changes should not be written to the hardware
405  *
406  * When a register map is marked with the cache bypass option, writes
407  * to the register map API will only update the hardware and not the
408  * the cache directly.  This is useful when syncing the cache back to
409  * the hardware.
410  */
regcache_cache_bypass(struct regmap * map,bool enable)411 void regcache_cache_bypass(struct regmap *map, bool enable)
412 {
413 	map->lock(map->lock_arg);
414 	WARN_ON(map->cache_only && enable);
415 	map->cache_bypass = enable;
416 	trace_regmap_cache_bypass(map->dev, enable);
417 	map->unlock(map->lock_arg);
418 }
419 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
420 
regcache_set_reg_present(struct regmap * map,unsigned int reg)421 int regcache_set_reg_present(struct regmap *map, unsigned int reg)
422 {
423 	unsigned long *cache_present;
424 	unsigned int cache_present_size;
425 	unsigned int nregs;
426 	int i;
427 
428 	nregs = reg + 1;
429 	cache_present_size = BITS_TO_LONGS(nregs);
430 	cache_present_size *= sizeof(long);
431 
432 	if (!map->cache_present) {
433 		cache_present = kmalloc(cache_present_size, GFP_KERNEL);
434 		if (!cache_present)
435 			return -ENOMEM;
436 		bitmap_zero(cache_present, nregs);
437 		map->cache_present = cache_present;
438 		map->cache_present_nbits = nregs;
439 	}
440 
441 	if (nregs > map->cache_present_nbits) {
442 		cache_present = krealloc(map->cache_present,
443 					 cache_present_size, GFP_KERNEL);
444 		if (!cache_present)
445 			return -ENOMEM;
446 		for (i = 0; i < nregs; i++)
447 			if (i >= map->cache_present_nbits)
448 				clear_bit(i, cache_present);
449 		map->cache_present = cache_present;
450 		map->cache_present_nbits = nregs;
451 	}
452 
453 	set_bit(reg, map->cache_present);
454 	return 0;
455 }
456 
regcache_set_val(struct regmap * map,void * base,unsigned int idx,unsigned int val)457 bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
458 		      unsigned int val)
459 {
460 	if (regcache_get_val(map, base, idx) == val)
461 		return true;
462 
463 	/* Use device native format if possible */
464 	if (map->format.format_val) {
465 		map->format.format_val(base + (map->cache_word_size * idx),
466 				       val, 0);
467 		return false;
468 	}
469 
470 	switch (map->cache_word_size) {
471 	case 1: {
472 		u8 *cache = base;
473 		cache[idx] = val;
474 		break;
475 	}
476 	case 2: {
477 		u16 *cache = base;
478 		cache[idx] = val;
479 		break;
480 	}
481 	case 4: {
482 		u32 *cache = base;
483 		cache[idx] = val;
484 		break;
485 	}
486 	default:
487 		BUG();
488 	}
489 	return false;
490 }
491 
regcache_get_val(struct regmap * map,const void * base,unsigned int idx)492 unsigned int regcache_get_val(struct regmap *map, const void *base,
493 			      unsigned int idx)
494 {
495 	if (!base)
496 		return -EINVAL;
497 
498 	/* Use device native format if possible */
499 	if (map->format.parse_val)
500 		return map->format.parse_val(regcache_get_val_addr(map, base,
501 								   idx));
502 
503 	switch (map->cache_word_size) {
504 	case 1: {
505 		const u8 *cache = base;
506 		return cache[idx];
507 	}
508 	case 2: {
509 		const u16 *cache = base;
510 		return cache[idx];
511 	}
512 	case 4: {
513 		const u32 *cache = base;
514 		return cache[idx];
515 	}
516 	default:
517 		BUG();
518 	}
519 	/* unreachable */
520 	return -1;
521 }
522 
regcache_default_cmp(const void * a,const void * b)523 static int regcache_default_cmp(const void *a, const void *b)
524 {
525 	const struct reg_default *_a = a;
526 	const struct reg_default *_b = b;
527 
528 	return _a->reg - _b->reg;
529 }
530 
regcache_lookup_reg(struct regmap * map,unsigned int reg)531 int regcache_lookup_reg(struct regmap *map, unsigned int reg)
532 {
533 	struct reg_default key;
534 	struct reg_default *r;
535 
536 	key.reg = reg;
537 	key.def = 0;
538 
539 	r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
540 		    sizeof(struct reg_default), regcache_default_cmp);
541 
542 	if (r)
543 		return r - map->reg_defaults;
544 	else
545 		return -ENOENT;
546 }
547 
regcache_sync_block_single(struct regmap * map,void * block,unsigned int block_base,unsigned int start,unsigned int end)548 static int regcache_sync_block_single(struct regmap *map, void *block,
549 				      unsigned int block_base,
550 				      unsigned int start, unsigned int end)
551 {
552 	unsigned int i, regtmp, val;
553 	int ret;
554 
555 	for (i = start; i < end; i++) {
556 		regtmp = block_base + (i * map->reg_stride);
557 
558 		if (!regcache_reg_present(map, regtmp))
559 			continue;
560 
561 		val = regcache_get_val(map, block, i);
562 
563 		/* Is this the hardware default?  If so skip. */
564 		ret = regcache_lookup_reg(map, regtmp);
565 		if (ret >= 0 && val == map->reg_defaults[ret].def)
566 			continue;
567 
568 		map->cache_bypass = 1;
569 
570 		ret = _regmap_write(map, regtmp, val);
571 
572 		map->cache_bypass = 0;
573 		if (ret != 0)
574 			return ret;
575 		dev_dbg(map->dev, "Synced register %#x, value %#x\n",
576 			regtmp, val);
577 	}
578 
579 	return 0;
580 }
581 
regcache_sync_block_raw_flush(struct regmap * map,const void ** data,unsigned int base,unsigned int cur)582 static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
583 					 unsigned int base, unsigned int cur)
584 {
585 	size_t val_bytes = map->format.val_bytes;
586 	int ret, count;
587 
588 	if (*data == NULL)
589 		return 0;
590 
591 	count = cur - base;
592 
593 	dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
594 		count * val_bytes, count, base, cur - 1);
595 
596 	map->cache_bypass = 1;
597 
598 	ret = _regmap_raw_write(map, base, *data, count * val_bytes,
599 				false);
600 
601 	map->cache_bypass = 0;
602 
603 	*data = NULL;
604 
605 	return ret;
606 }
607 
regcache_sync_block_raw(struct regmap * map,void * block,unsigned int block_base,unsigned int start,unsigned int end)608 static int regcache_sync_block_raw(struct regmap *map, void *block,
609 			    unsigned int block_base, unsigned int start,
610 			    unsigned int end)
611 {
612 	unsigned int i, val;
613 	unsigned int regtmp = 0;
614 	unsigned int base = 0;
615 	const void *data = NULL;
616 	int ret;
617 
618 	for (i = start; i < end; i++) {
619 		regtmp = block_base + (i * map->reg_stride);
620 
621 		if (!regcache_reg_present(map, regtmp)) {
622 			ret = regcache_sync_block_raw_flush(map, &data,
623 							    base, regtmp);
624 			if (ret != 0)
625 				return ret;
626 			continue;
627 		}
628 
629 		val = regcache_get_val(map, block, i);
630 
631 		/* Is this the hardware default?  If so skip. */
632 		ret = regcache_lookup_reg(map, regtmp);
633 		if (ret >= 0 && val == map->reg_defaults[ret].def) {
634 			ret = regcache_sync_block_raw_flush(map, &data,
635 							    base, regtmp);
636 			if (ret != 0)
637 				return ret;
638 			continue;
639 		}
640 
641 		if (!data) {
642 			data = regcache_get_val_addr(map, block, i);
643 			base = regtmp;
644 		}
645 	}
646 
647 	return regcache_sync_block_raw_flush(map, &data, base, regtmp);
648 }
649 
regcache_sync_block(struct regmap * map,void * block,unsigned int block_base,unsigned int start,unsigned int end)650 int regcache_sync_block(struct regmap *map, void *block,
651 			unsigned int block_base, unsigned int start,
652 			unsigned int end)
653 {
654 	if (regmap_can_raw_write(map))
655 		return regcache_sync_block_raw(map, block, block_base,
656 					       start, end);
657 	else
658 		return regcache_sync_block_single(map, block, block_base,
659 						  start, end);
660 }
661