1 // SPDX-License-Identifier: GPL-2.0
2
3 #define dev_fmt(fmt) "mtdoops-pstore: " fmt
4
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/pstore_blk.h>
8 #include <linux/mtd/mtd.h>
9 #include <linux/bitops.h>
10 #include <linux/slab.h>
11
12 static struct mtdpstore_context {
13 int index;
14 struct pstore_blk_config info;
15 struct pstore_device_info dev;
16 struct mtd_info *mtd;
17 unsigned long *rmmap; /* removed bit map */
18 unsigned long *usedmap; /* used bit map */
19 /*
20 * used for panic write
21 * As there are no block_isbad for panic case, we should keep this
22 * status before panic to ensure panic_write not failed.
23 */
24 unsigned long *badmap; /* bad block bit map */
25 } oops_cxt;
26
mtdpstore_block_isbad(struct mtdpstore_context * cxt,loff_t off)27 static int mtdpstore_block_isbad(struct mtdpstore_context *cxt, loff_t off)
28 {
29 int ret;
30 struct mtd_info *mtd = cxt->mtd;
31 u64 blknum;
32
33 off = ALIGN_DOWN(off, mtd->erasesize);
34 blknum = div_u64(off, mtd->erasesize);
35
36 if (test_bit(blknum, cxt->badmap))
37 return true;
38 ret = mtd_block_isbad(mtd, off);
39 if (ret < 0) {
40 dev_err(&mtd->dev, "mtd_block_isbad failed, aborting\n");
41 return ret;
42 } else if (ret > 0) {
43 set_bit(blknum, cxt->badmap);
44 return true;
45 }
46 return false;
47 }
48
mtdpstore_panic_block_isbad(struct mtdpstore_context * cxt,loff_t off)49 static inline int mtdpstore_panic_block_isbad(struct mtdpstore_context *cxt,
50 loff_t off)
51 {
52 struct mtd_info *mtd = cxt->mtd;
53 u64 blknum;
54
55 off = ALIGN_DOWN(off, mtd->erasesize);
56 blknum = div_u64(off, mtd->erasesize);
57 return test_bit(blknum, cxt->badmap);
58 }
59
mtdpstore_mark_used(struct mtdpstore_context * cxt,loff_t off)60 static inline void mtdpstore_mark_used(struct mtdpstore_context *cxt,
61 loff_t off)
62 {
63 struct mtd_info *mtd = cxt->mtd;
64 u64 zonenum = div_u64(off, cxt->info.kmsg_size);
65
66 dev_dbg(&mtd->dev, "mark zone %llu used\n", zonenum);
67 set_bit(zonenum, cxt->usedmap);
68 }
69
mtdpstore_mark_unused(struct mtdpstore_context * cxt,loff_t off)70 static inline void mtdpstore_mark_unused(struct mtdpstore_context *cxt,
71 loff_t off)
72 {
73 struct mtd_info *mtd = cxt->mtd;
74 u64 zonenum = div_u64(off, cxt->info.kmsg_size);
75
76 dev_dbg(&mtd->dev, "mark zone %llu unused\n", zonenum);
77 clear_bit(zonenum, cxt->usedmap);
78 }
79
mtdpstore_block_mark_unused(struct mtdpstore_context * cxt,loff_t off)80 static inline void mtdpstore_block_mark_unused(struct mtdpstore_context *cxt,
81 loff_t off)
82 {
83 struct mtd_info *mtd = cxt->mtd;
84 u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
85 u64 zonenum;
86
87 off = ALIGN_DOWN(off, mtd->erasesize);
88 zonenum = div_u64(off, cxt->info.kmsg_size);
89 while (zonecnt > 0) {
90 dev_dbg(&mtd->dev, "mark zone %llu unused\n", zonenum);
91 clear_bit(zonenum, cxt->usedmap);
92 zonenum++;
93 zonecnt--;
94 }
95 }
96
mtdpstore_is_used(struct mtdpstore_context * cxt,loff_t off)97 static inline int mtdpstore_is_used(struct mtdpstore_context *cxt, loff_t off)
98 {
99 u64 zonenum = div_u64(off, cxt->info.kmsg_size);
100 u64 blknum = div_u64(off, cxt->mtd->erasesize);
101
102 if (test_bit(blknum, cxt->badmap))
103 return true;
104 return test_bit(zonenum, cxt->usedmap);
105 }
106
mtdpstore_block_is_used(struct mtdpstore_context * cxt,loff_t off)107 static int mtdpstore_block_is_used(struct mtdpstore_context *cxt,
108 loff_t off)
109 {
110 struct mtd_info *mtd = cxt->mtd;
111 u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
112 u64 zonenum;
113
114 off = ALIGN_DOWN(off, mtd->erasesize);
115 zonenum = div_u64(off, cxt->info.kmsg_size);
116 while (zonecnt > 0) {
117 if (test_bit(zonenum, cxt->usedmap))
118 return true;
119 zonenum++;
120 zonecnt--;
121 }
122 return false;
123 }
124
mtdpstore_is_empty(struct mtdpstore_context * cxt,char * buf,size_t size)125 static int mtdpstore_is_empty(struct mtdpstore_context *cxt, char *buf,
126 size_t size)
127 {
128 struct mtd_info *mtd = cxt->mtd;
129 size_t sz;
130 int i;
131
132 sz = min_t(uint32_t, size, mtd->writesize / 4);
133 for (i = 0; i < sz; i++) {
134 if (buf[i] != (char)0xFF)
135 return false;
136 }
137 return true;
138 }
139
mtdpstore_mark_removed(struct mtdpstore_context * cxt,loff_t off)140 static void mtdpstore_mark_removed(struct mtdpstore_context *cxt, loff_t off)
141 {
142 struct mtd_info *mtd = cxt->mtd;
143 u64 zonenum = div_u64(off, cxt->info.kmsg_size);
144
145 dev_dbg(&mtd->dev, "mark zone %llu removed\n", zonenum);
146 set_bit(zonenum, cxt->rmmap);
147 }
148
mtdpstore_block_clear_removed(struct mtdpstore_context * cxt,loff_t off)149 static void mtdpstore_block_clear_removed(struct mtdpstore_context *cxt,
150 loff_t off)
151 {
152 struct mtd_info *mtd = cxt->mtd;
153 u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
154 u64 zonenum;
155
156 off = ALIGN_DOWN(off, mtd->erasesize);
157 zonenum = div_u64(off, cxt->info.kmsg_size);
158 while (zonecnt > 0) {
159 clear_bit(zonenum, cxt->rmmap);
160 zonenum++;
161 zonecnt--;
162 }
163 }
164
mtdpstore_block_is_removed(struct mtdpstore_context * cxt,loff_t off)165 static int mtdpstore_block_is_removed(struct mtdpstore_context *cxt,
166 loff_t off)
167 {
168 struct mtd_info *mtd = cxt->mtd;
169 u32 zonecnt = mtd->erasesize / cxt->info.kmsg_size;
170 u64 zonenum;
171
172 off = ALIGN_DOWN(off, mtd->erasesize);
173 zonenum = div_u64(off, cxt->info.kmsg_size);
174 while (zonecnt > 0) {
175 if (test_bit(zonenum, cxt->rmmap))
176 return true;
177 zonenum++;
178 zonecnt--;
179 }
180 return false;
181 }
182
mtdpstore_erase_do(struct mtdpstore_context * cxt,loff_t off)183 static int mtdpstore_erase_do(struct mtdpstore_context *cxt, loff_t off)
184 {
185 struct mtd_info *mtd = cxt->mtd;
186 struct erase_info erase;
187 int ret;
188
189 off = ALIGN_DOWN(off, cxt->mtd->erasesize);
190 dev_dbg(&mtd->dev, "try to erase off 0x%llx\n", off);
191 erase.len = cxt->mtd->erasesize;
192 erase.addr = off;
193 ret = mtd_erase(cxt->mtd, &erase);
194 if (!ret)
195 mtdpstore_block_clear_removed(cxt, off);
196 else
197 dev_err(&mtd->dev, "erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
198 (unsigned long long)erase.addr,
199 (unsigned long long)erase.len, cxt->info.device);
200 return ret;
201 }
202
203 /*
204 * called while removing file
205 *
206 * Avoiding over erasing, do erase block only when the whole block is unused.
207 * If the block contains valid log, do erase lazily on flush_removed() when
208 * unregister.
209 */
mtdpstore_erase(size_t size,loff_t off)210 static ssize_t mtdpstore_erase(size_t size, loff_t off)
211 {
212 struct mtdpstore_context *cxt = &oops_cxt;
213
214 if (mtdpstore_block_isbad(cxt, off))
215 return -EIO;
216
217 mtdpstore_mark_unused(cxt, off);
218
219 /* If the block still has valid data, mtdpstore do erase lazily */
220 if (likely(mtdpstore_block_is_used(cxt, off))) {
221 mtdpstore_mark_removed(cxt, off);
222 return 0;
223 }
224
225 /* all zones are unused, erase it */
226 return mtdpstore_erase_do(cxt, off);
227 }
228
229 /*
230 * What is security for mtdpstore?
231 * As there is no erase for panic case, we should ensure at least one zone
232 * is writable. Otherwise, panic write will fail.
233 * If zone is used, write operation will return -ENOMSG, which means that
234 * pstore/blk will try one by one until gets an empty zone. So, it is not
235 * needed to ensure the next zone is empty, but at least one.
236 */
mtdpstore_security(struct mtdpstore_context * cxt,loff_t off)237 static int mtdpstore_security(struct mtdpstore_context *cxt, loff_t off)
238 {
239 int ret = 0, i;
240 struct mtd_info *mtd = cxt->mtd;
241 u32 zonenum = (u32)div_u64(off, cxt->info.kmsg_size);
242 u32 zonecnt = (u32)div_u64(cxt->mtd->size, cxt->info.kmsg_size);
243 u32 blkcnt = (u32)div_u64(cxt->mtd->size, cxt->mtd->erasesize);
244 u32 erasesize = cxt->mtd->erasesize;
245
246 for (i = 0; i < zonecnt; i++) {
247 u32 num = (zonenum + i) % zonecnt;
248
249 /* found empty zone */
250 if (!test_bit(num, cxt->usedmap))
251 return 0;
252 }
253
254 /* If there is no any empty zone, we have no way but to do erase */
255 while (blkcnt--) {
256 div64_u64_rem(off + erasesize, cxt->mtd->size, (u64 *)&off);
257
258 if (mtdpstore_block_isbad(cxt, off))
259 continue;
260
261 ret = mtdpstore_erase_do(cxt, off);
262 if (!ret) {
263 mtdpstore_block_mark_unused(cxt, off);
264 break;
265 }
266 }
267
268 if (ret)
269 dev_err(&mtd->dev, "all blocks bad!\n");
270 dev_dbg(&mtd->dev, "end security\n");
271 return ret;
272 }
273
mtdpstore_write(const char * buf,size_t size,loff_t off)274 static ssize_t mtdpstore_write(const char *buf, size_t size, loff_t off)
275 {
276 struct mtdpstore_context *cxt = &oops_cxt;
277 struct mtd_info *mtd = cxt->mtd;
278 size_t retlen;
279 int ret;
280
281 if (mtdpstore_block_isbad(cxt, off))
282 return -ENOMSG;
283
284 /* zone is used, please try next one */
285 if (mtdpstore_is_used(cxt, off))
286 return -ENOMSG;
287
288 dev_dbg(&mtd->dev, "try to write off 0x%llx size %zu\n", off, size);
289 ret = mtd_write(cxt->mtd, off, size, &retlen, (u_char *)buf);
290 if (ret < 0 || retlen != size) {
291 dev_err(&mtd->dev, "write failure at %lld (%zu of %zu written), err %d\n",
292 off, retlen, size, ret);
293 return -EIO;
294 }
295 mtdpstore_mark_used(cxt, off);
296
297 mtdpstore_security(cxt, off);
298 return retlen;
299 }
300
mtdpstore_is_io_error(int ret)301 static inline bool mtdpstore_is_io_error(int ret)
302 {
303 return ret < 0 && !mtd_is_bitflip(ret) && !mtd_is_eccerr(ret);
304 }
305
306 /*
307 * All zones will be read as pstore/blk will read zone one by one when do
308 * recover.
309 */
mtdpstore_read(char * buf,size_t size,loff_t off)310 static ssize_t mtdpstore_read(char *buf, size_t size, loff_t off)
311 {
312 struct mtdpstore_context *cxt = &oops_cxt;
313 struct mtd_info *mtd = cxt->mtd;
314 size_t retlen, done;
315 int ret;
316
317 if (mtdpstore_block_isbad(cxt, off))
318 return -ENOMSG;
319
320 dev_dbg(&mtd->dev, "try to read off 0x%llx size %zu\n", off, size);
321 for (done = 0, retlen = 0; done < size; done += retlen) {
322 retlen = 0;
323
324 ret = mtd_read(cxt->mtd, off + done, size - done, &retlen,
325 (u_char *)buf + done);
326 if (mtdpstore_is_io_error(ret)) {
327 dev_err(&mtd->dev, "read failure at %lld (%zu of %zu read), err %d\n",
328 off + done, retlen, size - done, ret);
329 /* the zone may be broken, try next one */
330 return -ENOMSG;
331 }
332
333 /*
334 * ECC error. The impact on log data is so small. Maybe we can
335 * still read it and try to understand. So mtdpstore just hands
336 * over what it gets and user can judge whether the data is
337 * valid or not.
338 */
339 if (mtd_is_eccerr(ret)) {
340 dev_err(&mtd->dev, "ecc error at %lld (%zu of %zu read), err %d\n",
341 off + done, retlen, size - done, ret);
342 /* driver may not set retlen when ecc error */
343 retlen = retlen == 0 ? size - done : retlen;
344 }
345 }
346
347 if (mtdpstore_is_empty(cxt, buf, size))
348 mtdpstore_mark_unused(cxt, off);
349 else
350 mtdpstore_mark_used(cxt, off);
351
352 mtdpstore_security(cxt, off);
353 return retlen;
354 }
355
mtdpstore_panic_write(const char * buf,size_t size,loff_t off)356 static ssize_t mtdpstore_panic_write(const char *buf, size_t size, loff_t off)
357 {
358 struct mtdpstore_context *cxt = &oops_cxt;
359 struct mtd_info *mtd = cxt->mtd;
360 size_t retlen;
361 int ret;
362
363 if (mtdpstore_panic_block_isbad(cxt, off))
364 return -ENOMSG;
365
366 /* zone is used, please try next one */
367 if (mtdpstore_is_used(cxt, off))
368 return -ENOMSG;
369
370 ret = mtd_panic_write(cxt->mtd, off, size, &retlen, (u_char *)buf);
371 if (ret < 0 || size != retlen) {
372 dev_err(&mtd->dev, "panic write failure at %lld (%zu of %zu read), err %d\n",
373 off, retlen, size, ret);
374 return -EIO;
375 }
376 mtdpstore_mark_used(cxt, off);
377
378 return retlen;
379 }
380
mtdpstore_notify_add(struct mtd_info * mtd)381 static void mtdpstore_notify_add(struct mtd_info *mtd)
382 {
383 int ret;
384 struct mtdpstore_context *cxt = &oops_cxt;
385 struct pstore_blk_config *info = &cxt->info;
386 unsigned long longcnt;
387
388 if (!strcmp(mtd->name, info->device))
389 cxt->index = mtd->index;
390
391 if (mtd->index != cxt->index || cxt->index < 0)
392 return;
393
394 dev_dbg(&mtd->dev, "found matching MTD device %s\n", mtd->name);
395
396 if (mtd->size < info->kmsg_size * 2) {
397 dev_err(&mtd->dev, "MTD partition %d not big enough\n",
398 mtd->index);
399 return;
400 }
401 /*
402 * kmsg_size must be aligned to 4096 Bytes, which is limited by
403 * psblk. The default value of kmsg_size is 64KB. If kmsg_size
404 * is larger than erasesize, some errors will occur since mtdpsotre
405 * is designed on it.
406 */
407 if (mtd->erasesize < info->kmsg_size) {
408 dev_err(&mtd->dev, "eraseblock size of MTD partition %d too small\n",
409 mtd->index);
410 return;
411 }
412 if (unlikely(info->kmsg_size % mtd->writesize)) {
413 dev_err(&mtd->dev, "record size %lu KB must align to write size %d KB\n",
414 info->kmsg_size / 1024,
415 mtd->writesize / 1024);
416 return;
417 }
418
419 longcnt = BITS_TO_LONGS(div_u64(mtd->size, info->kmsg_size));
420 cxt->rmmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
421 cxt->usedmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
422
423 longcnt = BITS_TO_LONGS(div_u64(mtd->size, mtd->erasesize));
424 cxt->badmap = kcalloc(longcnt, sizeof(long), GFP_KERNEL);
425
426 /* just support dmesg right now */
427 cxt->dev.flags = PSTORE_FLAGS_DMESG;
428 cxt->dev.zone.read = mtdpstore_read;
429 cxt->dev.zone.write = mtdpstore_write;
430 cxt->dev.zone.erase = mtdpstore_erase;
431 cxt->dev.zone.panic_write = mtdpstore_panic_write;
432 cxt->dev.zone.total_size = mtd->size;
433
434 ret = register_pstore_device(&cxt->dev);
435 if (ret) {
436 dev_err(&mtd->dev, "mtd%d register to psblk failed\n",
437 mtd->index);
438 return;
439 }
440 cxt->mtd = mtd;
441 dev_info(&mtd->dev, "Attached to MTD device %d\n", mtd->index);
442 }
443
mtdpstore_flush_removed_do(struct mtdpstore_context * cxt,loff_t off,size_t size)444 static int mtdpstore_flush_removed_do(struct mtdpstore_context *cxt,
445 loff_t off, size_t size)
446 {
447 struct mtd_info *mtd = cxt->mtd;
448 u_char *buf;
449 int ret;
450 size_t retlen;
451 struct erase_info erase;
452
453 buf = kmalloc(mtd->erasesize, GFP_KERNEL);
454 if (!buf)
455 return -ENOMEM;
456
457 /* 1st. read to cache */
458 ret = mtd_read(mtd, off, mtd->erasesize, &retlen, buf);
459 if (mtdpstore_is_io_error(ret))
460 goto free;
461
462 /* 2nd. erase block */
463 erase.len = mtd->erasesize;
464 erase.addr = off;
465 ret = mtd_erase(mtd, &erase);
466 if (ret)
467 goto free;
468
469 /* 3rd. write back */
470 while (size) {
471 unsigned int zonesize = cxt->info.kmsg_size;
472
473 /* there is valid data on block, write back */
474 if (mtdpstore_is_used(cxt, off)) {
475 ret = mtd_write(mtd, off, zonesize, &retlen, buf);
476 if (ret)
477 dev_err(&mtd->dev, "write failure at %lld (%zu of %u written), err %d\n",
478 off, retlen, zonesize, ret);
479 }
480
481 off += zonesize;
482 size -= min_t(unsigned int, zonesize, size);
483 }
484
485 free:
486 kfree(buf);
487 return ret;
488 }
489
490 /*
491 * What does mtdpstore_flush_removed() do?
492 * When user remove any log file on pstore filesystem, mtdpstore should do
493 * something to ensure log file removed. If the whole block is no longer used,
494 * it's nice to erase the block. However if the block still contains valid log,
495 * what mtdpstore can do is to erase and write the valid log back.
496 */
mtdpstore_flush_removed(struct mtdpstore_context * cxt)497 static int mtdpstore_flush_removed(struct mtdpstore_context *cxt)
498 {
499 struct mtd_info *mtd = cxt->mtd;
500 int ret;
501 loff_t off;
502 u32 blkcnt = (u32)div_u64(mtd->size, mtd->erasesize);
503
504 for (off = 0; blkcnt > 0; blkcnt--, off += mtd->erasesize) {
505 ret = mtdpstore_block_isbad(cxt, off);
506 if (ret)
507 continue;
508
509 ret = mtdpstore_block_is_removed(cxt, off);
510 if (!ret)
511 continue;
512
513 ret = mtdpstore_flush_removed_do(cxt, off, mtd->erasesize);
514 if (ret)
515 return ret;
516 }
517 return 0;
518 }
519
mtdpstore_notify_remove(struct mtd_info * mtd)520 static void mtdpstore_notify_remove(struct mtd_info *mtd)
521 {
522 struct mtdpstore_context *cxt = &oops_cxt;
523
524 if (mtd->index != cxt->index || cxt->index < 0)
525 return;
526
527 mtdpstore_flush_removed(cxt);
528
529 unregister_pstore_device(&cxt->dev);
530 kfree(cxt->badmap);
531 kfree(cxt->usedmap);
532 kfree(cxt->rmmap);
533 cxt->mtd = NULL;
534 cxt->index = -1;
535 }
536
537 static struct mtd_notifier mtdpstore_notifier = {
538 .add = mtdpstore_notify_add,
539 .remove = mtdpstore_notify_remove,
540 };
541
mtdpstore_init(void)542 static int __init mtdpstore_init(void)
543 {
544 int ret;
545 struct mtdpstore_context *cxt = &oops_cxt;
546 struct pstore_blk_config *info = &cxt->info;
547
548 ret = pstore_blk_get_config(info);
549 if (unlikely(ret))
550 return ret;
551
552 if (strlen(info->device) == 0) {
553 pr_err("mtd device must be supplied (device name is empty)\n");
554 return -EINVAL;
555 }
556 if (!info->kmsg_size) {
557 pr_err("no backend enabled (kmsg_size is 0)\n");
558 return -EINVAL;
559 }
560
561 /* Setup the MTD device to use */
562 ret = kstrtoint((char *)info->device, 0, &cxt->index);
563 if (ret)
564 cxt->index = -1;
565
566 register_mtd_user(&mtdpstore_notifier);
567 return 0;
568 }
569 module_init(mtdpstore_init);
570
mtdpstore_exit(void)571 static void __exit mtdpstore_exit(void)
572 {
573 unregister_mtd_user(&mtdpstore_notifier);
574 }
575 module_exit(mtdpstore_exit);
576
577 MODULE_LICENSE("GPL");
578 MODULE_AUTHOR("WeiXiong Liao <liaoweixiong@allwinnertech.com>");
579 MODULE_DESCRIPTION("MTD backend for pstore/blk");
580