• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1  /*
2   * Copyright (C) 2012 Google, Inc.
3   *
4   * This software is licensed under the terms of the GNU General Public
5   * License version 2, as published by the Free Software Foundation, and
6   * may be copied, distributed, and modified under those terms.
7   *
8   * This program is distributed in the hope that it will be useful,
9   * but WITHOUT ANY WARRANTY; without even the implied warranty of
10   * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11   * GNU General Public License for more details.
12   *
13   */
14  
15  #define pr_fmt(fmt) "persistent_ram: " fmt
16  
17  #include <linux/device.h>
18  #include <linux/err.h>
19  #include <linux/errno.h>
20  #include <linux/init.h>
21  #include <linux/io.h>
22  #include <linux/kernel.h>
23  #include <linux/list.h>
24  #include <linux/memblock.h>
25  #include <linux/pstore_ram.h>
26  #include <linux/rslib.h>
27  #include <linux/slab.h>
28  #include <linux/uaccess.h>
29  #include <linux/vmalloc.h>
30  #include <asm/page.h>
31  
32  struct persistent_ram_buffer {
33  	uint32_t    sig;
34  	atomic_t    start;
35  	atomic_t    size;
36  	uint8_t     data[0];
37  };
38  
39  #define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */
40  
buffer_size(struct persistent_ram_zone * prz)41  static inline size_t buffer_size(struct persistent_ram_zone *prz)
42  {
43  	return atomic_read(&prz->buffer->size);
44  }
45  
buffer_start(struct persistent_ram_zone * prz)46  static inline size_t buffer_start(struct persistent_ram_zone *prz)
47  {
48  	return atomic_read(&prz->buffer->start);
49  }
50  
51  /* increase and wrap the start pointer, returning the old value */
buffer_start_add(struct persistent_ram_zone * prz,size_t a)52  static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
53  {
54  	int old;
55  	int new;
56  	unsigned long flags = 0;
57  
58  	if (!(prz->flags & PRZ_FLAG_NO_LOCK))
59  		raw_spin_lock_irqsave(&prz->buffer_lock, flags);
60  
61  	old = atomic_read(&prz->buffer->start);
62  	new = old + a;
63  	while (unlikely(new >= prz->buffer_size))
64  		new -= prz->buffer_size;
65  	atomic_set(&prz->buffer->start, new);
66  
67  	if (!(prz->flags & PRZ_FLAG_NO_LOCK))
68  		raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
69  
70  	return old;
71  }
72  
73  /* increase the size counter until it hits the max size */
buffer_size_add(struct persistent_ram_zone * prz,size_t a)74  static void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
75  {
76  	size_t old;
77  	size_t new;
78  	unsigned long flags = 0;
79  
80  	if (!(prz->flags & PRZ_FLAG_NO_LOCK))
81  		raw_spin_lock_irqsave(&prz->buffer_lock, flags);
82  
83  	old = atomic_read(&prz->buffer->size);
84  	if (old == prz->buffer_size)
85  		goto exit;
86  
87  	new = old + a;
88  	if (new > prz->buffer_size)
89  		new = prz->buffer_size;
90  	atomic_set(&prz->buffer->size, new);
91  
92  exit:
93  	if (!(prz->flags & PRZ_FLAG_NO_LOCK))
94  		raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
95  }
96  
persistent_ram_encode_rs8(struct persistent_ram_zone * prz,uint8_t * data,size_t len,uint8_t * ecc)97  static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
98  	uint8_t *data, size_t len, uint8_t *ecc)
99  {
100  	int i;
101  	uint16_t par[prz->ecc_info.ecc_size];
102  
103  	/* Initialize the parity buffer */
104  	memset(par, 0, sizeof(par));
105  	encode_rs8(prz->rs_decoder, data, len, par, 0);
106  	for (i = 0; i < prz->ecc_info.ecc_size; i++)
107  		ecc[i] = par[i];
108  }
109  
persistent_ram_decode_rs8(struct persistent_ram_zone * prz,void * data,size_t len,uint8_t * ecc)110  static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz,
111  	void *data, size_t len, uint8_t *ecc)
112  {
113  	int i;
114  	uint16_t par[prz->ecc_info.ecc_size];
115  
116  	for (i = 0; i < prz->ecc_info.ecc_size; i++)
117  		par[i] = ecc[i];
118  	return decode_rs8(prz->rs_decoder, data, par, len,
119  				NULL, 0, NULL, 0, NULL);
120  }
121  
persistent_ram_update_ecc(struct persistent_ram_zone * prz,unsigned int start,unsigned int count)122  static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz,
123  	unsigned int start, unsigned int count)
124  {
125  	struct persistent_ram_buffer *buffer = prz->buffer;
126  	uint8_t *buffer_end = buffer->data + prz->buffer_size;
127  	uint8_t *block;
128  	uint8_t *par;
129  	int ecc_block_size = prz->ecc_info.block_size;
130  	int ecc_size = prz->ecc_info.ecc_size;
131  	int size = ecc_block_size;
132  
133  	if (!ecc_size)
134  		return;
135  
136  	block = buffer->data + (start & ~(ecc_block_size - 1));
137  	par = prz->par_buffer + (start / ecc_block_size) * ecc_size;
138  
139  	do {
140  		if (block + ecc_block_size > buffer_end)
141  			size = buffer_end - block;
142  		persistent_ram_encode_rs8(prz, block, size, par);
143  		block += ecc_block_size;
144  		par += ecc_size;
145  	} while (block < buffer->data + start + count);
146  }
147  
persistent_ram_update_header_ecc(struct persistent_ram_zone * prz)148  static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz)
149  {
150  	struct persistent_ram_buffer *buffer = prz->buffer;
151  
152  	if (!prz->ecc_info.ecc_size)
153  		return;
154  
155  	persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer),
156  				  prz->par_header);
157  }
158  
persistent_ram_ecc_old(struct persistent_ram_zone * prz)159  static void persistent_ram_ecc_old(struct persistent_ram_zone *prz)
160  {
161  	struct persistent_ram_buffer *buffer = prz->buffer;
162  	uint8_t *block;
163  	uint8_t *par;
164  
165  	if (!prz->ecc_info.ecc_size)
166  		return;
167  
168  	block = buffer->data;
169  	par = prz->par_buffer;
170  	while (block < buffer->data + buffer_size(prz)) {
171  		int numerr;
172  		int size = prz->ecc_info.block_size;
173  		if (block + size > buffer->data + prz->buffer_size)
174  			size = buffer->data + prz->buffer_size - block;
175  		numerr = persistent_ram_decode_rs8(prz, block, size, par);
176  		if (numerr > 0) {
177  			pr_devel("error in block %p, %d\n", block, numerr);
178  			prz->corrected_bytes += numerr;
179  		} else if (numerr < 0) {
180  			pr_devel("uncorrectable error in block %p\n", block);
181  			prz->bad_blocks++;
182  		}
183  		block += prz->ecc_info.block_size;
184  		par += prz->ecc_info.ecc_size;
185  	}
186  }
187  
persistent_ram_init_ecc(struct persistent_ram_zone * prz,struct persistent_ram_ecc_info * ecc_info)188  static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
189  				   struct persistent_ram_ecc_info *ecc_info)
190  {
191  	int numerr;
192  	struct persistent_ram_buffer *buffer = prz->buffer;
193  	int ecc_blocks;
194  	size_t ecc_total;
195  
196  	if (!ecc_info || !ecc_info->ecc_size)
197  		return 0;
198  
199  	prz->ecc_info.block_size = ecc_info->block_size ?: 128;
200  	prz->ecc_info.ecc_size = ecc_info->ecc_size ?: 16;
201  	prz->ecc_info.symsize = ecc_info->symsize ?: 8;
202  	prz->ecc_info.poly = ecc_info->poly ?: 0x11d;
203  
204  	ecc_blocks = DIV_ROUND_UP(prz->buffer_size - prz->ecc_info.ecc_size,
205  				  prz->ecc_info.block_size +
206  				  prz->ecc_info.ecc_size);
207  	ecc_total = (ecc_blocks + 1) * prz->ecc_info.ecc_size;
208  	if (ecc_total >= prz->buffer_size) {
209  		pr_err("%s: invalid ecc_size %u (total %zu, buffer size %zu)\n",
210  		       __func__, prz->ecc_info.ecc_size,
211  		       ecc_total, prz->buffer_size);
212  		return -EINVAL;
213  	}
214  
215  	prz->buffer_size -= ecc_total;
216  	prz->par_buffer = buffer->data + prz->buffer_size;
217  	prz->par_header = prz->par_buffer +
218  			  ecc_blocks * prz->ecc_info.ecc_size;
219  
220  	/*
221  	 * first consecutive root is 0
222  	 * primitive element to generate roots = 1
223  	 */
224  	prz->rs_decoder = init_rs(prz->ecc_info.symsize, prz->ecc_info.poly,
225  				  0, 1, prz->ecc_info.ecc_size);
226  	if (prz->rs_decoder == NULL) {
227  		pr_info("init_rs failed\n");
228  		return -EINVAL;
229  	}
230  
231  	prz->corrected_bytes = 0;
232  	prz->bad_blocks = 0;
233  
234  	numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer),
235  					   prz->par_header);
236  	if (numerr > 0) {
237  		pr_info("error in header, %d\n", numerr);
238  		prz->corrected_bytes += numerr;
239  	} else if (numerr < 0) {
240  		pr_info("uncorrectable error in header\n");
241  		prz->bad_blocks++;
242  	}
243  
244  	return 0;
245  }
246  
persistent_ram_ecc_string(struct persistent_ram_zone * prz,char * str,size_t len)247  ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
248  	char *str, size_t len)
249  {
250  	ssize_t ret;
251  
252  	if (!prz->ecc_info.ecc_size)
253  		return 0;
254  
255  	if (prz->corrected_bytes || prz->bad_blocks)
256  		ret = snprintf(str, len, ""
257  			"\n%d Corrected bytes, %d unrecoverable blocks\n",
258  			prz->corrected_bytes, prz->bad_blocks);
259  	else
260  		ret = snprintf(str, len, "\nNo errors detected\n");
261  
262  	return ret;
263  }
264  
persistent_ram_update(struct persistent_ram_zone * prz,const void * s,unsigned int start,unsigned int count)265  static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
266  	const void *s, unsigned int start, unsigned int count)
267  {
268  	struct persistent_ram_buffer *buffer = prz->buffer;
269  	memcpy_toio(buffer->data + start, s, count);
270  	persistent_ram_update_ecc(prz, start, count);
271  }
272  
persistent_ram_update_user(struct persistent_ram_zone * prz,const void __user * s,unsigned int start,unsigned int count)273  static int notrace persistent_ram_update_user(struct persistent_ram_zone *prz,
274  	const void __user *s, unsigned int start, unsigned int count)
275  {
276  	struct persistent_ram_buffer *buffer = prz->buffer;
277  	int ret = unlikely(__copy_from_user(buffer->data + start, s, count)) ?
278  		-EFAULT : 0;
279  	persistent_ram_update_ecc(prz, start, count);
280  	return ret;
281  }
282  
persistent_ram_save_old(struct persistent_ram_zone * prz)283  void persistent_ram_save_old(struct persistent_ram_zone *prz)
284  {
285  	struct persistent_ram_buffer *buffer = prz->buffer;
286  	size_t size = buffer_size(prz);
287  	size_t start = buffer_start(prz);
288  
289  	if (!size)
290  		return;
291  
292  	if (!prz->old_log) {
293  		persistent_ram_ecc_old(prz);
294  		prz->old_log = kmalloc(size, GFP_KERNEL);
295  	}
296  	if (!prz->old_log) {
297  		pr_err("failed to allocate buffer\n");
298  		return;
299  	}
300  
301  	prz->old_log_size = size;
302  	memcpy_fromio(prz->old_log, &buffer->data[start], size - start);
303  	memcpy_fromio(prz->old_log + size - start, &buffer->data[0], start);
304  }
305  
persistent_ram_write(struct persistent_ram_zone * prz,const void * s,unsigned int count)306  int notrace persistent_ram_write(struct persistent_ram_zone *prz,
307  	const void *s, unsigned int count)
308  {
309  	int rem;
310  	int c = count;
311  	size_t start;
312  
313  	if (unlikely(c > prz->buffer_size)) {
314  		s += c - prz->buffer_size;
315  		c = prz->buffer_size;
316  	}
317  
318  	buffer_size_add(prz, c);
319  
320  	start = buffer_start_add(prz, c);
321  
322  	rem = prz->buffer_size - start;
323  	if (unlikely(rem < c)) {
324  		persistent_ram_update(prz, s, start, rem);
325  		s += rem;
326  		c -= rem;
327  		start = 0;
328  	}
329  	persistent_ram_update(prz, s, start, c);
330  
331  	persistent_ram_update_header_ecc(prz);
332  
333  	return count;
334  }
335  
persistent_ram_write_user(struct persistent_ram_zone * prz,const void __user * s,unsigned int count)336  int notrace persistent_ram_write_user(struct persistent_ram_zone *prz,
337  	const void __user *s, unsigned int count)
338  {
339  	int rem, ret = 0, c = count;
340  	size_t start;
341  
342  	if (unlikely(!access_ok(VERIFY_READ, s, count)))
343  		return -EFAULT;
344  	if (unlikely(c > prz->buffer_size)) {
345  		s += c - prz->buffer_size;
346  		c = prz->buffer_size;
347  	}
348  
349  	buffer_size_add(prz, c);
350  
351  	start = buffer_start_add(prz, c);
352  
353  	rem = prz->buffer_size - start;
354  	if (unlikely(rem < c)) {
355  		ret = persistent_ram_update_user(prz, s, start, rem);
356  		s += rem;
357  		c -= rem;
358  		start = 0;
359  	}
360  	if (likely(!ret))
361  		ret = persistent_ram_update_user(prz, s, start, c);
362  
363  	persistent_ram_update_header_ecc(prz);
364  
365  	return unlikely(ret) ? ret : count;
366  }
367  
persistent_ram_old_size(struct persistent_ram_zone * prz)368  size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
369  {
370  	return prz->old_log_size;
371  }
372  
persistent_ram_old(struct persistent_ram_zone * prz)373  void *persistent_ram_old(struct persistent_ram_zone *prz)
374  {
375  	return prz->old_log;
376  }
377  
persistent_ram_free_old(struct persistent_ram_zone * prz)378  void persistent_ram_free_old(struct persistent_ram_zone *prz)
379  {
380  	kfree(prz->old_log);
381  	prz->old_log = NULL;
382  	prz->old_log_size = 0;
383  }
384  
persistent_ram_zap(struct persistent_ram_zone * prz)385  void persistent_ram_zap(struct persistent_ram_zone *prz)
386  {
387  	atomic_set(&prz->buffer->start, 0);
388  	atomic_set(&prz->buffer->size, 0);
389  	persistent_ram_update_header_ecc(prz);
390  }
391  
persistent_ram_vmap(phys_addr_t start,size_t size,unsigned int memtype)392  static void *persistent_ram_vmap(phys_addr_t start, size_t size,
393  		unsigned int memtype)
394  {
395  	struct page **pages;
396  	phys_addr_t page_start;
397  	unsigned int page_count;
398  	pgprot_t prot;
399  	unsigned int i;
400  	void *vaddr;
401  
402  	page_start = start - offset_in_page(start);
403  	page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
404  
405  	if (memtype)
406  		prot = pgprot_noncached(PAGE_KERNEL);
407  	else
408  		prot = pgprot_writecombine(PAGE_KERNEL);
409  
410  	pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
411  	if (!pages) {
412  		pr_err("%s: Failed to allocate array for %u pages\n",
413  		       __func__, page_count);
414  		return NULL;
415  	}
416  
417  	for (i = 0; i < page_count; i++) {
418  		phys_addr_t addr = page_start + i * PAGE_SIZE;
419  		pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
420  	}
421  	vaddr = vmap(pages, page_count, VM_MAP, prot);
422  	kfree(pages);
423  
424  	return vaddr;
425  }
426  
persistent_ram_iomap(phys_addr_t start,size_t size,unsigned int memtype)427  static void *persistent_ram_iomap(phys_addr_t start, size_t size,
428  		unsigned int memtype)
429  {
430  	void *va;
431  
432  	if (!request_mem_region(start, size, "persistent_ram")) {
433  		pr_err("request mem region (0x%llx@0x%llx) failed\n",
434  			(unsigned long long)size, (unsigned long long)start);
435  		return NULL;
436  	}
437  
438  	if (memtype)
439  		va = ioremap(start, size);
440  	else
441  		va = ioremap_wc(start, size);
442  
443  	return va;
444  }
445  
persistent_ram_buffer_map(phys_addr_t start,phys_addr_t size,struct persistent_ram_zone * prz,int memtype)446  static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
447  		struct persistent_ram_zone *prz, int memtype)
448  {
449  	prz->paddr = start;
450  	prz->size = size;
451  
452  	if (pfn_valid(start >> PAGE_SHIFT))
453  		prz->vaddr = persistent_ram_vmap(start, size, memtype);
454  	else
455  		prz->vaddr = persistent_ram_iomap(start, size, memtype);
456  
457  	if (!prz->vaddr) {
458  		pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
459  			(unsigned long long)size, (unsigned long long)start);
460  		return -ENOMEM;
461  	}
462  
463  	prz->buffer = prz->vaddr + offset_in_page(start);
464  	prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
465  
466  	return 0;
467  }
468  
persistent_ram_post_init(struct persistent_ram_zone * prz,u32 sig,struct persistent_ram_ecc_info * ecc_info)469  static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
470  				    struct persistent_ram_ecc_info *ecc_info)
471  {
472  	int ret;
473  
474  	ret = persistent_ram_init_ecc(prz, ecc_info);
475  	if (ret)
476  		return ret;
477  
478  	sig ^= PERSISTENT_RAM_SIG;
479  
480  	if (prz->buffer->sig == sig) {
481  		if (buffer_size(prz) > prz->buffer_size ||
482  		    buffer_start(prz) > buffer_size(prz))
483  			pr_info("found existing invalid buffer, size %zu, start %zu\n",
484  				buffer_size(prz), buffer_start(prz));
485  		else {
486  			pr_debug("found existing buffer, size %zu, start %zu\n",
487  				 buffer_size(prz), buffer_start(prz));
488  			persistent_ram_save_old(prz);
489  			return 0;
490  		}
491  	} else {
492  		pr_debug("no valid data in buffer (sig = 0x%08x)\n",
493  			 prz->buffer->sig);
494  	}
495  
496  	/* Rewind missing or invalid memory area. */
497  	prz->buffer->sig = sig;
498  	persistent_ram_zap(prz);
499  
500  	return 0;
501  }
502  
persistent_ram_free(struct persistent_ram_zone * prz)503  void persistent_ram_free(struct persistent_ram_zone *prz)
504  {
505  	if (!prz)
506  		return;
507  
508  	if (prz->vaddr) {
509  		if (pfn_valid(prz->paddr >> PAGE_SHIFT)) {
510  			vunmap(prz->vaddr);
511  		} else {
512  			iounmap(prz->vaddr);
513  			release_mem_region(prz->paddr, prz->size);
514  		}
515  		prz->vaddr = NULL;
516  	}
517  	persistent_ram_free_old(prz);
518  	kfree(prz);
519  }
520  
persistent_ram_new(phys_addr_t start,size_t size,u32 sig,struct persistent_ram_ecc_info * ecc_info,unsigned int memtype,u32 flags)521  struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
522  			u32 sig, struct persistent_ram_ecc_info *ecc_info,
523  			unsigned int memtype, u32 flags)
524  {
525  	struct persistent_ram_zone *prz;
526  	int ret = -ENOMEM;
527  
528  	prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL);
529  	if (!prz) {
530  		pr_err("failed to allocate persistent ram zone\n");
531  		goto err;
532  	}
533  
534  	/* Initialize general buffer state. */
535  	raw_spin_lock_init(&prz->buffer_lock);
536  	prz->flags = flags;
537  
538  	ret = persistent_ram_buffer_map(start, size, prz, memtype);
539  	if (ret)
540  		goto err;
541  
542  	ret = persistent_ram_post_init(prz, sig, ecc_info);
543  	if (ret)
544  		goto err;
545  
546  	return prz;
547  err:
548  	persistent_ram_free(prz);
549  	return ERR_PTR(ret);
550  }
551