1 /*
2 * Register cache access API - LZO caching support
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13 #include <linux/slab.h>
14 #include <linux/device.h>
15 #include <linux/lzo.h>
16
17 #include "internal.h"
18
19 static int regcache_lzo_exit(struct regmap *map);
20
21 struct regcache_lzo_ctx {
22 void *wmem;
23 void *dst;
24 const void *src;
25 size_t src_len;
26 size_t dst_len;
27 size_t decompressed_size;
28 unsigned long *sync_bmp;
29 int sync_bmp_nbits;
30 };
31
32 #define LZO_BLOCK_NUM 8
regcache_lzo_block_count(struct regmap * map)33 static int regcache_lzo_block_count(struct regmap *map)
34 {
35 return LZO_BLOCK_NUM;
36 }
37
regcache_lzo_prepare(struct regcache_lzo_ctx * lzo_ctx)38 static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx)
39 {
40 lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
41 if (!lzo_ctx->wmem)
42 return -ENOMEM;
43 return 0;
44 }
45
regcache_lzo_compress(struct regcache_lzo_ctx * lzo_ctx)46 static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx)
47 {
48 size_t compress_size;
49 int ret;
50
51 ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
52 lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
53 if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
54 return -EINVAL;
55 lzo_ctx->dst_len = compress_size;
56 return 0;
57 }
58
regcache_lzo_decompress(struct regcache_lzo_ctx * lzo_ctx)59 static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx)
60 {
61 size_t dst_len;
62 int ret;
63
64 dst_len = lzo_ctx->dst_len;
65 ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
66 lzo_ctx->dst, &dst_len);
67 if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
68 return -EINVAL;
69 return 0;
70 }
71
regcache_lzo_compress_cache_block(struct regmap * map,struct regcache_lzo_ctx * lzo_ctx)72 static int regcache_lzo_compress_cache_block(struct regmap *map,
73 struct regcache_lzo_ctx *lzo_ctx)
74 {
75 int ret;
76
77 lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
78 lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
79 if (!lzo_ctx->dst) {
80 lzo_ctx->dst_len = 0;
81 return -ENOMEM;
82 }
83
84 ret = regcache_lzo_compress(lzo_ctx);
85 if (ret < 0)
86 return ret;
87 return 0;
88 }
89
regcache_lzo_decompress_cache_block(struct regmap * map,struct regcache_lzo_ctx * lzo_ctx)90 static int regcache_lzo_decompress_cache_block(struct regmap *map,
91 struct regcache_lzo_ctx *lzo_ctx)
92 {
93 int ret;
94
95 lzo_ctx->dst_len = lzo_ctx->decompressed_size;
96 lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
97 if (!lzo_ctx->dst) {
98 lzo_ctx->dst_len = 0;
99 return -ENOMEM;
100 }
101
102 ret = regcache_lzo_decompress(lzo_ctx);
103 if (ret < 0)
104 return ret;
105 return 0;
106 }
107
regcache_lzo_get_blkindex(struct regmap * map,unsigned int reg)108 static inline int regcache_lzo_get_blkindex(struct regmap *map,
109 unsigned int reg)
110 {
111 return ((reg / map->reg_stride) * map->cache_word_size) /
112 DIV_ROUND_UP(map->cache_size_raw,
113 regcache_lzo_block_count(map));
114 }
115
regcache_lzo_get_blkpos(struct regmap * map,unsigned int reg)116 static inline int regcache_lzo_get_blkpos(struct regmap *map,
117 unsigned int reg)
118 {
119 return (reg / map->reg_stride) %
120 (DIV_ROUND_UP(map->cache_size_raw,
121 regcache_lzo_block_count(map)) /
122 map->cache_word_size);
123 }
124
regcache_lzo_get_blksize(struct regmap * map)125 static inline int regcache_lzo_get_blksize(struct regmap *map)
126 {
127 return DIV_ROUND_UP(map->cache_size_raw,
128 regcache_lzo_block_count(map));
129 }
130
regcache_lzo_init(struct regmap * map)131 static int regcache_lzo_init(struct regmap *map)
132 {
133 struct regcache_lzo_ctx **lzo_blocks;
134 size_t bmp_size;
135 int ret, i, blksize, blkcount;
136 const char *p, *end;
137 unsigned long *sync_bmp;
138
139 ret = 0;
140
141 blkcount = regcache_lzo_block_count(map);
142 map->cache = kzalloc(blkcount * sizeof *lzo_blocks,
143 GFP_KERNEL);
144 if (!map->cache)
145 return -ENOMEM;
146 lzo_blocks = map->cache;
147
148 /*
149 * allocate a bitmap to be used when syncing the cache with
150 * the hardware. Each time a register is modified, the corresponding
151 * bit is set in the bitmap, so we know that we have to sync
152 * that register.
153 */
154 bmp_size = map->num_reg_defaults_raw;
155 sync_bmp = kmalloc(BITS_TO_LONGS(bmp_size) * sizeof(long),
156 GFP_KERNEL);
157 if (!sync_bmp) {
158 ret = -ENOMEM;
159 goto err;
160 }
161 bitmap_zero(sync_bmp, bmp_size);
162
163 /* allocate the lzo blocks and initialize them */
164 for (i = 0; i < blkcount; i++) {
165 lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
166 GFP_KERNEL);
167 if (!lzo_blocks[i]) {
168 kfree(sync_bmp);
169 ret = -ENOMEM;
170 goto err;
171 }
172 lzo_blocks[i]->sync_bmp = sync_bmp;
173 lzo_blocks[i]->sync_bmp_nbits = bmp_size;
174 /* alloc the working space for the compressed block */
175 ret = regcache_lzo_prepare(lzo_blocks[i]);
176 if (ret < 0)
177 goto err;
178 }
179
180 blksize = regcache_lzo_get_blksize(map);
181 p = map->reg_defaults_raw;
182 end = map->reg_defaults_raw + map->cache_size_raw;
183 /* compress the register map and fill the lzo blocks */
184 for (i = 0; i < blkcount; i++, p += blksize) {
185 lzo_blocks[i]->src = p;
186 if (p + blksize > end)
187 lzo_blocks[i]->src_len = end - p;
188 else
189 lzo_blocks[i]->src_len = blksize;
190 ret = regcache_lzo_compress_cache_block(map,
191 lzo_blocks[i]);
192 if (ret < 0)
193 goto err;
194 lzo_blocks[i]->decompressed_size =
195 lzo_blocks[i]->src_len;
196 }
197
198 return 0;
199 err:
200 regcache_lzo_exit(map);
201 return ret;
202 }
203
regcache_lzo_exit(struct regmap * map)204 static int regcache_lzo_exit(struct regmap *map)
205 {
206 struct regcache_lzo_ctx **lzo_blocks;
207 int i, blkcount;
208
209 lzo_blocks = map->cache;
210 if (!lzo_blocks)
211 return 0;
212
213 blkcount = regcache_lzo_block_count(map);
214 /*
215 * the pointer to the bitmap used for syncing the cache
216 * is shared amongst all lzo_blocks. Ensure it is freed
217 * only once.
218 */
219 if (lzo_blocks[0])
220 kfree(lzo_blocks[0]->sync_bmp);
221 for (i = 0; i < blkcount; i++) {
222 if (lzo_blocks[i]) {
223 kfree(lzo_blocks[i]->wmem);
224 kfree(lzo_blocks[i]->dst);
225 }
226 /* each lzo_block is a pointer returned by kmalloc or NULL */
227 kfree(lzo_blocks[i]);
228 }
229 kfree(lzo_blocks);
230 map->cache = NULL;
231 return 0;
232 }
233
regcache_lzo_read(struct regmap * map,unsigned int reg,unsigned int * value)234 static int regcache_lzo_read(struct regmap *map,
235 unsigned int reg, unsigned int *value)
236 {
237 struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
238 int ret, blkindex, blkpos;
239 size_t blksize, tmp_dst_len;
240 void *tmp_dst;
241
242 /* index of the compressed lzo block */
243 blkindex = regcache_lzo_get_blkindex(map, reg);
244 /* register index within the decompressed block */
245 blkpos = regcache_lzo_get_blkpos(map, reg);
246 /* size of the compressed block */
247 blksize = regcache_lzo_get_blksize(map);
248 lzo_blocks = map->cache;
249 lzo_block = lzo_blocks[blkindex];
250
251 /* save the pointer and length of the compressed block */
252 tmp_dst = lzo_block->dst;
253 tmp_dst_len = lzo_block->dst_len;
254
255 /* prepare the source to be the compressed block */
256 lzo_block->src = lzo_block->dst;
257 lzo_block->src_len = lzo_block->dst_len;
258
259 /* decompress the block */
260 ret = regcache_lzo_decompress_cache_block(map, lzo_block);
261 if (ret >= 0)
262 /* fetch the value from the cache */
263 *value = regcache_get_val(map, lzo_block->dst, blkpos);
264
265 kfree(lzo_block->dst);
266 /* restore the pointer and length of the compressed block */
267 lzo_block->dst = tmp_dst;
268 lzo_block->dst_len = tmp_dst_len;
269
270 return ret;
271 }
272
regcache_lzo_write(struct regmap * map,unsigned int reg,unsigned int value)273 static int regcache_lzo_write(struct regmap *map,
274 unsigned int reg, unsigned int value)
275 {
276 struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
277 int ret, blkindex, blkpos;
278 size_t blksize, tmp_dst_len;
279 void *tmp_dst;
280
281 /* index of the compressed lzo block */
282 blkindex = regcache_lzo_get_blkindex(map, reg);
283 /* register index within the decompressed block */
284 blkpos = regcache_lzo_get_blkpos(map, reg);
285 /* size of the compressed block */
286 blksize = regcache_lzo_get_blksize(map);
287 lzo_blocks = map->cache;
288 lzo_block = lzo_blocks[blkindex];
289
290 /* save the pointer and length of the compressed block */
291 tmp_dst = lzo_block->dst;
292 tmp_dst_len = lzo_block->dst_len;
293
294 /* prepare the source to be the compressed block */
295 lzo_block->src = lzo_block->dst;
296 lzo_block->src_len = lzo_block->dst_len;
297
298 /* decompress the block */
299 ret = regcache_lzo_decompress_cache_block(map, lzo_block);
300 if (ret < 0) {
301 kfree(lzo_block->dst);
302 goto out;
303 }
304
305 /* write the new value to the cache */
306 if (regcache_set_val(map, lzo_block->dst, blkpos, value)) {
307 kfree(lzo_block->dst);
308 goto out;
309 }
310
311 /* prepare the source to be the decompressed block */
312 lzo_block->src = lzo_block->dst;
313 lzo_block->src_len = lzo_block->dst_len;
314
315 /* compress the block */
316 ret = regcache_lzo_compress_cache_block(map, lzo_block);
317 if (ret < 0) {
318 kfree(lzo_block->dst);
319 kfree(lzo_block->src);
320 goto out;
321 }
322
323 /* set the bit so we know we have to sync this register */
324 set_bit(reg / map->reg_stride, lzo_block->sync_bmp);
325 kfree(tmp_dst);
326 kfree(lzo_block->src);
327 return 0;
328 out:
329 lzo_block->dst = tmp_dst;
330 lzo_block->dst_len = tmp_dst_len;
331 return ret;
332 }
333
regcache_lzo_sync(struct regmap * map,unsigned int min,unsigned int max)334 static int regcache_lzo_sync(struct regmap *map, unsigned int min,
335 unsigned int max)
336 {
337 struct regcache_lzo_ctx **lzo_blocks;
338 unsigned int val;
339 int i;
340 int ret;
341
342 lzo_blocks = map->cache;
343 i = min;
344 for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp,
345 lzo_blocks[0]->sync_bmp_nbits) {
346 if (i > max)
347 continue;
348
349 ret = regcache_read(map, i, &val);
350 if (ret)
351 return ret;
352
353 /* Is this the hardware default? If so skip. */
354 ret = regcache_lookup_reg(map, i);
355 if (ret > 0 && val == map->reg_defaults[ret].def)
356 continue;
357
358 map->cache_bypass = 1;
359 ret = _regmap_write(map, i, val);
360 map->cache_bypass = 0;
361 if (ret)
362 return ret;
363 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
364 i, val);
365 }
366
367 return 0;
368 }
369
370 struct regcache_ops regcache_lzo_ops = {
371 .type = REGCACHE_COMPRESSED,
372 .name = "lzo",
373 .init = regcache_lzo_init,
374 .exit = regcache_lzo_exit,
375 .read = regcache_lzo_read,
376 .write = regcache_lzo_write,
377 .sync = regcache_lzo_sync
378 };
379