• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Provide a pstore intermediate backend, organized into kernel memory
4  * allocated zones that are then mapped and flushed into a single
5  * contiguous region on a storage backend of some kind (block, mtd, etc).
6  */
7 
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/mount.h>
14 #include <linux/printk.h>
15 #include <linux/fs.h>
16 #include <linux/pstore_zone.h>
17 #include <linux/kdev_t.h>
18 #include <linux/device.h>
19 #include <linux/namei.h>
20 #include <linux/fcntl.h>
21 #include <linux/uio.h>
22 #include <linux/writeback.h>
23 #include "internal.h"
24 
25 /**
26  * struct psz_head - header of zone to flush to storage
27  *
28  * @sig: signature to indicate header (PSZ_SIG xor PSZONE-type value)
29  * @datalen: length of data in @data
30  * @start: offset into @data where the beginning of the stored bytes begin
31  * @data: zone data.
32  */
33 struct psz_buffer {
34 #define PSZ_SIG (0x43474244) /* DBGC */
35 	uint32_t sig;
36 	atomic_t datalen;
37 	atomic_t start;
38 	uint8_t data[];
39 };
40 
41 /**
42  * struct psz_kmsg_header - kmsg dump-specific header to flush to storage
43  *
44  * @magic: magic num for kmsg dump header
45  * @time: kmsg dump trigger time
46  * @compressed: whether conpressed
47  * @counter: kmsg dump counter
48  * @reason: the kmsg dump reason (e.g. oops, panic, etc)
49  * @data: pointer to log data
50  *
51  * This is a sub-header for a kmsg dump, trailing after &psz_buffer.
52  */
53 struct psz_kmsg_header {
54 #define PSTORE_KMSG_HEADER_MAGIC 0x4dfc3ae5 /* Just a random number */
55 	uint32_t magic;
56 	struct timespec64 time;
57 	bool compressed;
58 	uint32_t counter;
59 	enum kmsg_dump_reason reason;
60 	uint8_t data[];
61 };
62 
63 /**
64  * struct pstore_zone - single stored buffer
65  *
66  * @off: zone offset of storage
67  * @type: front-end type for this zone
68  * @name: front-end name for this zone
69  * @buffer: pointer to data buffer managed by this zone
70  * @oldbuf: pointer to old data buffer
71  * @buffer_size: bytes in @buffer->data
72  * @should_recover: whether this zone should recover from storage
73  * @dirty: whether the data in @buffer dirty
74  *
75  * zone structure in memory.
76  */
77 struct pstore_zone {
78 	loff_t off;
79 	const char *name;
80 	enum pstore_type_id type;
81 
82 	struct psz_buffer *buffer;
83 	struct psz_buffer *oldbuf;
84 	size_t buffer_size;
85 	bool should_recover;
86 	atomic_t dirty;
87 };
88 
89 /**
90  * struct psz_context - all about running state of pstore/zone
91  *
92  * @kpszs: kmsg dump storage zones
93  * @ppsz: pmsg storage zone
94  * @cpsz: console storage zone
95  * @fpszs: ftrace storage zones
96  * @bpsz: blackbox storage zone
97  * @kmsg_max_cnt: max count of @kpszs
98  * @kmsg_read_cnt: counter of total read kmsg dumps
99  * @kmsg_write_cnt: counter of total kmsg dump writes
100  * @pmsg_read_cnt: counter of total read pmsg zone
101  * @console_read_cnt: counter of total read console zone
102  * @ftrace_max_cnt: max count of @fpszs
103  * @ftrace_read_cnt: counter of max read ftrace zone
104  * @blackbox_read_cnt: counter of total read blackbox zone
105  * @oops_counter: counter of oops dumps
106  * @panic_counter: counter of panic dumps
107  * @recovered: whether finished recovering data from storage
108  * @on_panic: whether panic is happening
109  * @pstore_zone_info_lock: lock to @pstore_zone_info
110  * @pstore_zone_info: information from backend
111  * @pstore: structure for pstore
112  */
113 struct psz_context {
114 	struct pstore_zone **kpszs;
115 	struct pstore_zone *ppsz;
116 	struct pstore_zone *cpsz;
117 	struct pstore_zone **fpszs;
118 	struct pstore_zone *bpsz;
119 	unsigned int kmsg_max_cnt;
120 	unsigned int kmsg_read_cnt;
121 	unsigned int kmsg_write_cnt;
122 	unsigned int pmsg_read_cnt;
123 	unsigned int console_read_cnt;
124 	unsigned int ftrace_max_cnt;
125 	unsigned int ftrace_read_cnt;
126 	unsigned int blackbox_read_cnt;
127 	/*
128 	 * These counters should be calculated during recovery.
129 	 * It records the oops/panic times after crashes rather than boots.
130 	 */
131 	unsigned int oops_counter;
132 	unsigned int panic_counter;
133 	atomic_t recovered;
134 	atomic_t on_panic;
135 
136 	/*
137 	 * pstore_zone_info_lock protects this entire structure during calls
138 	 * to register_pstore_zone()/unregister_pstore_zone().
139 	 */
140 	struct mutex pstore_zone_info_lock;
141 	struct pstore_zone_info *pstore_zone_info;
142 	struct pstore_info pstore;
143 };
144 static struct psz_context pstore_zone_cxt;
145 
146 static void psz_flush_all_dirty_zones(struct work_struct *);
147 static DECLARE_DELAYED_WORK(psz_cleaner, psz_flush_all_dirty_zones);
148 
149 /**
150  * enum psz_flush_mode - flush mode for psz_zone_write()
151  *
152  * @FLUSH_NONE: do not flush to storage but update data on memory
153  * @FLUSH_PART: just flush part of data including meta data to storage
154  * @FLUSH_META: just flush meta data of zone to storage
155  * @FLUSH_ALL: flush all of zone
156  */
157 enum psz_flush_mode {
158 	FLUSH_NONE = 0,
159 	FLUSH_PART,
160 	FLUSH_META,
161 	FLUSH_ALL,
162 };
163 
buffer_datalen(struct pstore_zone * zone)164 static inline int buffer_datalen(struct pstore_zone *zone)
165 {
166 	return atomic_read(&zone->buffer->datalen);
167 }
168 
buffer_start(struct pstore_zone * zone)169 static inline int buffer_start(struct pstore_zone *zone)
170 {
171 	return atomic_read(&zone->buffer->start);
172 }
173 
is_on_panic(void)174 static inline bool is_on_panic(void)
175 {
176 	return atomic_read(&pstore_zone_cxt.on_panic);
177 }
178 
psz_zone_read_buffer(struct pstore_zone * zone,char * buf,size_t len,unsigned long off)179 static ssize_t psz_zone_read_buffer(struct pstore_zone *zone, char *buf,
180 		size_t len, unsigned long off)
181 {
182 	if (!buf || !zone || !zone->buffer)
183 		return -EINVAL;
184 	if (off > zone->buffer_size)
185 		return -EINVAL;
186 	len = min_t(size_t, len, zone->buffer_size - off);
187 	memcpy(buf, zone->buffer->data + off, len);
188 	return len;
189 }
190 
psz_zone_read_oldbuf(struct pstore_zone * zone,char * buf,size_t len,unsigned long off)191 static int psz_zone_read_oldbuf(struct pstore_zone *zone, char *buf,
192 		size_t len, unsigned long off)
193 {
194 	if (!buf || !zone || !zone->oldbuf)
195 		return -EINVAL;
196 	if (off > zone->buffer_size)
197 		return -EINVAL;
198 	len = min_t(size_t, len, zone->buffer_size - off);
199 	memcpy(buf, zone->oldbuf->data + off, len);
200 	return 0;
201 }
202 
psz_zone_write(struct pstore_zone * zone,enum psz_flush_mode flush_mode,const char * buf,size_t len,unsigned long off)203 static int psz_zone_write(struct pstore_zone *zone,
204 		enum psz_flush_mode flush_mode, const char *buf,
205 		size_t len, unsigned long off)
206 {
207 	struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info;
208 	ssize_t wcnt = 0;
209 	ssize_t (*writeop)(const char *buf, size_t bytes, loff_t pos);
210 	size_t wlen;
211 
212 	if (off > zone->buffer_size)
213 		return -EINVAL;
214 
215 	wlen = min_t(size_t, len, zone->buffer_size - off);
216 	if (buf && wlen) {
217 		memcpy(zone->buffer->data + off, buf, wlen);
218 		atomic_set(&zone->buffer->datalen, wlen + off);
219 	}
220 
221 	/* avoid to damage old records */
222 	if (!is_on_panic() && !atomic_read(&pstore_zone_cxt.recovered))
223 		goto dirty;
224 
225 	writeop = is_on_panic() ? info->panic_write : info->write;
226 	if (!writeop)
227 		goto dirty;
228 
229 	switch (flush_mode) {
230 	case FLUSH_NONE:
231 		if (unlikely(buf && wlen))
232 			goto dirty;
233 		return 0;
234 	case FLUSH_PART:
235 		wcnt = writeop((const char *)zone->buffer->data + off, wlen,
236 				zone->off + sizeof(*zone->buffer) + off);
237 		if (wcnt != wlen)
238 			goto dirty;
239 		fallthrough;
240 	case FLUSH_META:
241 		wlen = sizeof(struct psz_buffer);
242 		wcnt = writeop((const char *)zone->buffer, wlen, zone->off);
243 		if (wcnt != wlen)
244 			goto dirty;
245 		break;
246 	case FLUSH_ALL:
247 		wlen = zone->buffer_size + sizeof(*zone->buffer);
248 		wcnt = writeop((const char *)zone->buffer, wlen, zone->off);
249 		if (wcnt != wlen)
250 			goto dirty;
251 		break;
252 	}
253 
254 	return 0;
255 dirty:
256 	/* no need to mark dirty if going to try next zone */
257 	if (wcnt == -ENOMSG)
258 		return -ENOMSG;
259 	atomic_set(&zone->dirty, true);
260 	/* flush dirty zones nicely */
261 	if (wcnt == -EBUSY && !is_on_panic())
262 		schedule_delayed_work(&psz_cleaner, msecs_to_jiffies(500));
263 	return -EBUSY;
264 }
265 
psz_flush_dirty_zone(struct pstore_zone * zone)266 static int psz_flush_dirty_zone(struct pstore_zone *zone)
267 {
268 	int ret;
269 
270 	if (unlikely(!zone))
271 		return -EINVAL;
272 
273 	if (unlikely(!atomic_read(&pstore_zone_cxt.recovered)))
274 		return -EBUSY;
275 
276 	if (!atomic_xchg(&zone->dirty, false))
277 		return 0;
278 
279 	ret = psz_zone_write(zone, FLUSH_ALL, NULL, 0, 0);
280 	if (ret)
281 		atomic_set(&zone->dirty, true);
282 	return ret;
283 }
284 
psz_flush_dirty_zones(struct pstore_zone ** zones,unsigned int cnt)285 static int psz_flush_dirty_zones(struct pstore_zone **zones, unsigned int cnt)
286 {
287 	int i, ret;
288 	struct pstore_zone *zone;
289 
290 	if (!zones)
291 		return -EINVAL;
292 
293 	for (i = 0; i < cnt; i++) {
294 		zone = zones[i];
295 		if (!zone)
296 			return -EINVAL;
297 		ret = psz_flush_dirty_zone(zone);
298 		if (ret)
299 			return ret;
300 	}
301 	return 0;
302 }
303 
psz_move_zone(struct pstore_zone * old,struct pstore_zone * new)304 static int psz_move_zone(struct pstore_zone *old, struct pstore_zone *new)
305 {
306 	const char *data = (const char *)old->buffer->data;
307 	int ret;
308 
309 	ret = psz_zone_write(new, FLUSH_ALL, data, buffer_datalen(old), 0);
310 	if (ret) {
311 		atomic_set(&new->buffer->datalen, 0);
312 		atomic_set(&new->dirty, false);
313 		return ret;
314 	}
315 	atomic_set(&old->buffer->datalen, 0);
316 	return 0;
317 }
318 
psz_flush_all_dirty_zones(struct work_struct * work)319 static void psz_flush_all_dirty_zones(struct work_struct *work)
320 {
321 	struct psz_context *cxt = &pstore_zone_cxt;
322 	int ret = 0;
323 
324 	if (cxt->ppsz)
325 		ret |= psz_flush_dirty_zone(cxt->ppsz);
326 	if (cxt->cpsz)
327 		ret |= psz_flush_dirty_zone(cxt->cpsz);
328 	if (cxt->kpszs)
329 		ret |= psz_flush_dirty_zones(cxt->kpszs, cxt->kmsg_max_cnt);
330 	if (cxt->fpszs)
331 		ret |= psz_flush_dirty_zones(cxt->fpszs, cxt->ftrace_max_cnt);
332 	if (cxt->bpsz)
333 		ret |= psz_flush_dirty_zone(cxt->bpsz);
334 	if (ret && cxt->pstore_zone_info)
335 		schedule_delayed_work(&psz_cleaner, msecs_to_jiffies(1000));
336 }
337 
psz_kmsg_recover_data(struct psz_context * cxt)338 static int psz_kmsg_recover_data(struct psz_context *cxt)
339 {
340 	struct pstore_zone_info *info = cxt->pstore_zone_info;
341 	struct pstore_zone *zone = NULL;
342 	struct psz_buffer *buf;
343 	unsigned long i;
344 	ssize_t rcnt;
345 
346 	if (!info->read)
347 		return -EINVAL;
348 
349 	for (i = 0; i < cxt->kmsg_max_cnt; i++) {
350 		zone = cxt->kpszs[i];
351 		if (unlikely(!zone))
352 			return -EINVAL;
353 		if (atomic_read(&zone->dirty)) {
354 			unsigned int wcnt = cxt->kmsg_write_cnt;
355 			struct pstore_zone *new = cxt->kpszs[wcnt];
356 			int ret;
357 
358 			ret = psz_move_zone(zone, new);
359 			if (ret) {
360 				pr_err("move zone from %lu to %d failed\n",
361 						i, wcnt);
362 				return ret;
363 			}
364 			cxt->kmsg_write_cnt = (wcnt + 1) % cxt->kmsg_max_cnt;
365 		}
366 		if (!zone->should_recover)
367 			continue;
368 		buf = zone->buffer;
369 		rcnt = info->read((char *)buf, zone->buffer_size + sizeof(*buf),
370 				zone->off);
371 		if (rcnt != zone->buffer_size + sizeof(*buf))
372 			return (int)rcnt < 0 ? (int)rcnt : -EIO;
373 	}
374 	return 0;
375 }
376 
psz_kmsg_recover_meta(struct psz_context * cxt)377 static int psz_kmsg_recover_meta(struct psz_context *cxt)
378 {
379 	struct pstore_zone_info *info = cxt->pstore_zone_info;
380 	struct pstore_zone *zone;
381 	size_t rcnt, len;
382 	struct psz_buffer *buf;
383 	struct psz_kmsg_header *hdr;
384 	struct timespec64 time = { };
385 	unsigned long i;
386 	/*
387 	 * Recover may on panic, we can't allocate any memory by kmalloc.
388 	 * So, we use local array instead.
389 	 */
390 	char buffer_header[sizeof(*buf) + sizeof(*hdr)] = {0};
391 
392 	if (!info->read)
393 		return -EINVAL;
394 
395 	len = sizeof(*buf) + sizeof(*hdr);
396 	buf = (struct psz_buffer *)buffer_header;
397 	for (i = 0; i < cxt->kmsg_max_cnt; i++) {
398 		zone = cxt->kpszs[i];
399 		if (unlikely(!zone))
400 			return -EINVAL;
401 
402 		rcnt = info->read((char *)buf, len, zone->off);
403 		if (rcnt == -ENOMSG) {
404 			pr_debug("%s with id %lu may be broken, skip\n",
405 					zone->name, i);
406 			continue;
407 		} else if (rcnt != len) {
408 			pr_err("read %s with id %lu failed\n", zone->name, i);
409 			return (int)rcnt < 0 ? (int)rcnt : -EIO;
410 		}
411 
412 		if (buf->sig != zone->buffer->sig) {
413 			pr_debug("no valid data in kmsg dump zone %lu\n", i);
414 			continue;
415 		}
416 
417 		if (zone->buffer_size < atomic_read(&buf->datalen)) {
418 			pr_info("found overtop zone: %s: id %lu, off %lld, size %zu\n",
419 					zone->name, i, zone->off,
420 					zone->buffer_size);
421 			continue;
422 		}
423 
424 		hdr = (struct psz_kmsg_header *)buf->data;
425 		if (hdr->magic != PSTORE_KMSG_HEADER_MAGIC) {
426 			pr_info("found invalid zone: %s: id %lu, off %lld, size %zu\n",
427 					zone->name, i, zone->off,
428 					zone->buffer_size);
429 			continue;
430 		}
431 
432 		/*
433 		 * we get the newest zone, and the next one must be the oldest
434 		 * or unused zone, because we do write one by one like a circle.
435 		 */
436 		if (hdr->time.tv_sec >= time.tv_sec) {
437 			time.tv_sec = hdr->time.tv_sec;
438 			cxt->kmsg_write_cnt = (i + 1) % cxt->kmsg_max_cnt;
439 		}
440 
441 		if (hdr->reason == KMSG_DUMP_OOPS)
442 			cxt->oops_counter =
443 				max(cxt->oops_counter, hdr->counter);
444 		else if (hdr->reason == KMSG_DUMP_PANIC)
445 			cxt->panic_counter =
446 				max(cxt->panic_counter, hdr->counter);
447 
448 		if (!atomic_read(&buf->datalen)) {
449 			pr_debug("found erased zone: %s: id %lu, off %lld, size %zu, datalen %d\n",
450 					zone->name, i, zone->off,
451 					zone->buffer_size,
452 					atomic_read(&buf->datalen));
453 			continue;
454 		}
455 
456 		if (!is_on_panic())
457 			zone->should_recover = true;
458 		pr_debug("found nice zone: %s: id %lu, off %lld, size %zu, datalen %d\n",
459 				zone->name, i, zone->off,
460 				zone->buffer_size, atomic_read(&buf->datalen));
461 	}
462 
463 	return 0;
464 }
465 
psz_kmsg_recover(struct psz_context * cxt)466 static int psz_kmsg_recover(struct psz_context *cxt)
467 {
468 	int ret;
469 
470 	if (!cxt->kpszs)
471 		return 0;
472 
473 	ret = psz_kmsg_recover_meta(cxt);
474 	if (ret)
475 		goto recover_fail;
476 
477 	ret = psz_kmsg_recover_data(cxt);
478 	if (ret)
479 		goto recover_fail;
480 
481 	return 0;
482 recover_fail:
483 	pr_debug("psz_recover_kmsg failed\n");
484 	return ret;
485 }
486 
psz_recover_zone(struct psz_context * cxt,struct pstore_zone * zone)487 static int psz_recover_zone(struct psz_context *cxt, struct pstore_zone *zone)
488 {
489 	struct pstore_zone_info *info = cxt->pstore_zone_info;
490 	struct psz_buffer *oldbuf, tmpbuf;
491 	int ret = 0;
492 	char *buf;
493 	ssize_t rcnt, len, start, off;
494 
495 	if (!zone || zone->oldbuf)
496 		return 0;
497 
498 	if (is_on_panic()) {
499 		/* save data as much as possible */
500 		psz_flush_dirty_zone(zone);
501 		return 0;
502 	}
503 
504 	if (unlikely(!info->read))
505 		return -EINVAL;
506 
507 	len = sizeof(struct psz_buffer);
508 	rcnt = info->read((char *)&tmpbuf, len, zone->off);
509 	if (rcnt != len) {
510 		pr_debug("read zone %s failed\n", zone->name);
511 		return (int)rcnt < 0 ? (int)rcnt : -EIO;
512 	}
513 
514 	if (tmpbuf.sig != zone->buffer->sig) {
515 		pr_debug("no valid data in zone %s\n", zone->name);
516 		return 0;
517 	}
518 
519 	if (zone->buffer_size < atomic_read(&tmpbuf.datalen) ||
520 		zone->buffer_size < atomic_read(&tmpbuf.start)) {
521 		pr_info("found overtop zone: %s: off %lld, size %zu\n",
522 				zone->name, zone->off, zone->buffer_size);
523 		/* just keep going */
524 		return 0;
525 	}
526 
527 	if (!atomic_read(&tmpbuf.datalen)) {
528 		pr_debug("found erased zone: %s: off %lld, size %zu, datalen %d\n",
529 				zone->name, zone->off, zone->buffer_size,
530 				atomic_read(&tmpbuf.datalen));
531 		return 0;
532 	}
533 
534 	pr_debug("found nice zone: %s: off %lld, size %zu, datalen %d\n",
535 			zone->name, zone->off, zone->buffer_size,
536 			atomic_read(&tmpbuf.datalen));
537 
538 	len = atomic_read(&tmpbuf.datalen) + sizeof(*oldbuf);
539 	oldbuf = kzalloc(len, GFP_KERNEL);
540 	if (!oldbuf)
541 		return -ENOMEM;
542 
543 	memcpy(oldbuf, &tmpbuf, sizeof(*oldbuf));
544 	buf = (char *)oldbuf + sizeof(*oldbuf);
545 	len = atomic_read(&oldbuf->datalen);
546 	start = atomic_read(&oldbuf->start);
547 	off = zone->off + sizeof(*oldbuf);
548 
549 	/* get part of data */
550 	rcnt = info->read(buf, len - start, off + start);
551 	if (rcnt != len - start) {
552 		pr_err("read zone %s failed\n", zone->name);
553 		ret = (int)rcnt < 0 ? (int)rcnt : -EIO;
554 		goto free_oldbuf;
555 	}
556 
557 	/* get the rest of data */
558 	rcnt = info->read(buf + len - start, start, off);
559 	if (rcnt != start) {
560 		pr_err("read zone %s failed\n", zone->name);
561 		ret = (int)rcnt < 0 ? (int)rcnt : -EIO;
562 		goto free_oldbuf;
563 	}
564 
565 	zone->oldbuf = oldbuf;
566 	psz_flush_dirty_zone(zone);
567 	return 0;
568 
569 free_oldbuf:
570 	kfree(oldbuf);
571 	return ret;
572 }
573 
psz_recover_zones(struct psz_context * cxt,struct pstore_zone ** zones,unsigned int cnt)574 static int psz_recover_zones(struct psz_context *cxt,
575 		struct pstore_zone **zones, unsigned int cnt)
576 {
577 	int ret;
578 	unsigned int i;
579 	struct pstore_zone *zone;
580 
581 	if (!zones)
582 		return 0;
583 
584 	for (i = 0; i < cnt; i++) {
585 		zone = zones[i];
586 		if (unlikely(!zone))
587 			continue;
588 		ret = psz_recover_zone(cxt, zone);
589 		if (ret)
590 			goto recover_fail;
591 	}
592 
593 	return 0;
594 recover_fail:
595 	pr_debug("recover %s[%u] failed\n", zone->name, i);
596 	return ret;
597 }
598 
599 /**
600  * psz_recovery() - recover data from storage
601  * @cxt: the context of pstore/zone
602  *
603  * recovery means reading data back from storage after rebooting
604  *
605  * Return: 0 on success, others on failure.
606  */
psz_recovery(struct psz_context * cxt)607 static inline int psz_recovery(struct psz_context *cxt)
608 {
609 	int ret;
610 
611 	if (atomic_read(&cxt->recovered))
612 		return 0;
613 
614 	ret = psz_kmsg_recover(cxt);
615 	if (ret)
616 		goto out;
617 
618 	ret = psz_recover_zone(cxt, cxt->ppsz);
619 	if (ret)
620 		goto out;
621 
622 	ret = psz_recover_zone(cxt, cxt->cpsz);
623 	if (ret)
624 		goto out;
625 
626 	ret = psz_recover_zone(cxt, cxt->bpsz);
627 	if (ret)
628 		goto out;
629 
630 	ret = psz_recover_zones(cxt, cxt->fpszs, cxt->ftrace_max_cnt);
631 
632 out:
633 	if (unlikely(ret))
634 		pr_err("recover failed\n");
635 	else {
636 		pr_debug("recover end!\n");
637 		atomic_set(&cxt->recovered, 1);
638 	}
639 	return ret;
640 }
641 
psz_pstore_open(struct pstore_info * psi)642 static int psz_pstore_open(struct pstore_info *psi)
643 {
644 	struct psz_context *cxt = psi->data;
645 
646 	cxt->kmsg_read_cnt = 0;
647 	cxt->pmsg_read_cnt = 0;
648 	cxt->console_read_cnt = 0;
649 	cxt->ftrace_read_cnt = 0;
650 	cxt->blackbox_read_cnt = 0;
651 	return 0;
652 }
653 
psz_old_ok(struct pstore_zone * zone)654 static inline bool psz_old_ok(struct pstore_zone *zone)
655 {
656 	if (zone && zone->oldbuf && atomic_read(&zone->oldbuf->datalen))
657 		return true;
658 	return false;
659 }
660 
psz_ok(struct pstore_zone * zone)661 static inline bool psz_ok(struct pstore_zone *zone)
662 {
663 	if (zone && zone->buffer && buffer_datalen(zone))
664 		return true;
665 	return false;
666 }
667 
psz_kmsg_erase(struct psz_context * cxt,struct pstore_zone * zone,struct pstore_record * record)668 static inline int psz_kmsg_erase(struct psz_context *cxt,
669 		struct pstore_zone *zone, struct pstore_record *record)
670 {
671 	struct psz_buffer *buffer = zone->buffer;
672 	struct psz_kmsg_header *hdr =
673 		(struct psz_kmsg_header *)buffer->data;
674 	size_t size;
675 
676 	if (unlikely(!psz_ok(zone)))
677 		return 0;
678 
679 	/* this zone is already updated, no need to erase */
680 	if (record->count != hdr->counter)
681 		return 0;
682 
683 	size = buffer_datalen(zone) + sizeof(*zone->buffer);
684 	atomic_set(&zone->buffer->datalen, 0);
685 	if (cxt->pstore_zone_info->erase)
686 		return cxt->pstore_zone_info->erase(size, zone->off);
687 	else
688 		return psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
689 }
690 
psz_record_erase(struct psz_context * cxt,struct pstore_zone * zone)691 static inline int psz_record_erase(struct psz_context *cxt,
692 		struct pstore_zone *zone)
693 {
694 	if (unlikely(!psz_old_ok(zone)))
695 		return 0;
696 
697 	kfree(zone->oldbuf);
698 	zone->oldbuf = NULL;
699 	/*
700 	 * if there are new data in zone buffer, that means the old data
701 	 * are already invalid. It is no need to flush 0 (erase) to
702 	 * block device.
703 	 */
704 	if (!buffer_datalen(zone))
705 		return psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
706 	psz_flush_dirty_zone(zone);
707 	return 0;
708 }
709 
psz_pstore_erase(struct pstore_record * record)710 static int psz_pstore_erase(struct pstore_record *record)
711 {
712 	struct psz_context *cxt = record->psi->data;
713 
714 	switch (record->type) {
715 	case PSTORE_TYPE_DMESG:
716 		if (record->id >= cxt->kmsg_max_cnt)
717 			return -EINVAL;
718 		return psz_kmsg_erase(cxt, cxt->kpszs[record->id], record);
719 	case PSTORE_TYPE_PMSG:
720 		return psz_record_erase(cxt, cxt->ppsz);
721 	case PSTORE_TYPE_CONSOLE:
722 		return psz_record_erase(cxt, cxt->cpsz);
723 	case PSTORE_TYPE_FTRACE:
724 		if (record->id >= cxt->ftrace_max_cnt)
725 			return -EINVAL;
726 		return psz_record_erase(cxt, cxt->fpszs[record->id]);
727 	case PSTORE_TYPE_BLACKBOX:
728 		return psz_record_erase(cxt, cxt->bpsz);
729 	default: return -EINVAL;
730 	}
731 }
732 
psz_write_kmsg_hdr(struct pstore_zone * zone,struct pstore_record * record)733 static void psz_write_kmsg_hdr(struct pstore_zone *zone,
734 		struct pstore_record *record)
735 {
736 	struct psz_context *cxt = record->psi->data;
737 	struct psz_buffer *buffer = zone->buffer;
738 	struct psz_kmsg_header *hdr =
739 		(struct psz_kmsg_header *)buffer->data;
740 
741 	hdr->magic = PSTORE_KMSG_HEADER_MAGIC;
742 	hdr->compressed = record->compressed;
743 	hdr->time.tv_sec = record->time.tv_sec;
744 	hdr->time.tv_nsec = record->time.tv_nsec;
745 	hdr->reason = record->reason;
746 	if (hdr->reason == KMSG_DUMP_OOPS)
747 		hdr->counter = ++cxt->oops_counter;
748 	else if (hdr->reason == KMSG_DUMP_PANIC)
749 		hdr->counter = ++cxt->panic_counter;
750 	else
751 		hdr->counter = 0;
752 }
753 
754 /*
755  * In case zone is broken, which may occur to MTD device, we try each zones,
756  * start at cxt->kmsg_write_cnt.
757  */
psz_kmsg_write_record(struct psz_context * cxt,struct pstore_record * record)758 static inline int notrace psz_kmsg_write_record(struct psz_context *cxt,
759 		struct pstore_record *record)
760 {
761 	size_t size, hlen;
762 	struct pstore_zone *zone;
763 	unsigned int i;
764 
765 	for (i = 0; i < cxt->kmsg_max_cnt; i++) {
766 		unsigned int zonenum, len;
767 		int ret;
768 
769 		zonenum = (cxt->kmsg_write_cnt + i) % cxt->kmsg_max_cnt;
770 		zone = cxt->kpszs[zonenum];
771 		if (unlikely(!zone))
772 			return -ENOSPC;
773 
774 		/* avoid destroying old data, allocate a new one */
775 		len = zone->buffer_size + sizeof(*zone->buffer);
776 		zone->oldbuf = zone->buffer;
777 		zone->buffer = kzalloc(len, GFP_ATOMIC);
778 		if (!zone->buffer) {
779 			zone->buffer = zone->oldbuf;
780 			return -ENOMEM;
781 		}
782 		zone->buffer->sig = zone->oldbuf->sig;
783 
784 		pr_debug("write %s to zone id %d\n", zone->name, zonenum);
785 		psz_write_kmsg_hdr(zone, record);
786 		hlen = sizeof(struct psz_kmsg_header);
787 		size = min_t(size_t, record->size, zone->buffer_size - hlen);
788 		ret = psz_zone_write(zone, FLUSH_ALL, record->buf, size, hlen);
789 		if (likely(!ret || ret != -ENOMSG)) {
790 			cxt->kmsg_write_cnt = zonenum + 1;
791 			cxt->kmsg_write_cnt %= cxt->kmsg_max_cnt;
792 			/* no need to try next zone, free last zone buffer */
793 			kfree(zone->oldbuf);
794 			zone->oldbuf = NULL;
795 			return ret;
796 		}
797 
798 		pr_debug("zone %u may be broken, try next dmesg zone\n",
799 				zonenum);
800 		kfree(zone->buffer);
801 		zone->buffer = zone->oldbuf;
802 		zone->oldbuf = NULL;
803 	}
804 
805 	return -EBUSY;
806 }
807 
psz_kmsg_write(struct psz_context * cxt,struct pstore_record * record)808 static int notrace psz_kmsg_write(struct psz_context *cxt,
809 		struct pstore_record *record)
810 {
811 	int ret;
812 
813 	/*
814 	 * Explicitly only take the first part of any new crash.
815 	 * If our buffer is larger than kmsg_bytes, this can never happen,
816 	 * and if our buffer is smaller than kmsg_bytes, we don't want the
817 	 * report split across multiple records.
818 	 */
819 	if (record->part != 1)
820 		return -ENOSPC;
821 
822 	if (!cxt->kpszs)
823 		return -ENOSPC;
824 
825 	ret = psz_kmsg_write_record(cxt, record);
826 	if (!ret && is_on_panic()) {
827 		/* ensure all data are flushed to storage when panic */
828 		pr_debug("try to flush other dirty zones\n");
829 		psz_flush_all_dirty_zones(NULL);
830 	}
831 
832 	/* always return 0 as we had handled it on buffer */
833 	return 0;
834 }
835 
psz_record_write(struct pstore_zone * zone,struct pstore_record * record)836 static int notrace psz_record_write(struct pstore_zone *zone,
837 		struct pstore_record *record)
838 {
839 	size_t start, rem;
840 	bool is_full_data = false;
841 	char *buf;
842 	int cnt;
843 
844 	if (!zone || !record)
845 		return -ENOSPC;
846 
847 	if (atomic_read(&zone->buffer->datalen) >= zone->buffer_size)
848 		is_full_data = true;
849 
850 	cnt = record->size;
851 	buf = record->buf;
852 	if (unlikely(cnt > zone->buffer_size)) {
853 		buf += cnt - zone->buffer_size;
854 		cnt = zone->buffer_size;
855 	}
856 
857 	start = buffer_start(zone);
858 	rem = zone->buffer_size - start;
859 	if (unlikely(rem < cnt)) {
860 		psz_zone_write(zone, FLUSH_PART, buf, rem, start);
861 		buf += rem;
862 		cnt -= rem;
863 		start = 0;
864 		is_full_data = true;
865 	}
866 
867 	atomic_set(&zone->buffer->start, cnt + start);
868 	psz_zone_write(zone, FLUSH_PART, buf, cnt, start);
869 
870 	/**
871 	 * psz_zone_write will set datalen as start + cnt.
872 	 * It work if actual data length lesser than buffer size.
873 	 * If data length greater than buffer size, pmsg will rewrite to
874 	 * beginning of zone, which make buffer->datalen wrongly.
875 	 * So we should reset datalen as buffer size once actual data length
876 	 * greater than buffer size.
877 	 */
878 	if (is_full_data) {
879 		atomic_set(&zone->buffer->datalen, zone->buffer_size);
880 		psz_zone_write(zone, FLUSH_META, NULL, 0, 0);
881 	}
882 	return 0;
883 }
884 
psz_pstore_write(struct pstore_record * record)885 static int notrace psz_pstore_write(struct pstore_record *record)
886 {
887 	struct psz_context *cxt = record->psi->data;
888 
889 	if (record->type == PSTORE_TYPE_DMESG &&
890 			record->reason == KMSG_DUMP_PANIC)
891 		atomic_set(&cxt->on_panic, 1);
892 
893 	/*
894 	 * if on panic, do not write except panic records
895 	 * Fix case that panic_write prints log which wakes up console backend.
896 	 */
897 	if (is_on_panic() && record->type != PSTORE_TYPE_DMESG)
898 		return -EBUSY;
899 
900 	switch (record->type) {
901 	case PSTORE_TYPE_DMESG:
902 		return psz_kmsg_write(cxt, record);
903 	case PSTORE_TYPE_CONSOLE:
904 		return psz_record_write(cxt->cpsz, record);
905 	case PSTORE_TYPE_PMSG:
906 		return psz_record_write(cxt->ppsz, record);
907 	case PSTORE_TYPE_FTRACE: {
908 		int zonenum = smp_processor_id();
909 
910 		if (!cxt->fpszs)
911 			return -ENOSPC;
912 		return psz_record_write(cxt->fpszs[zonenum], record);
913 	}
914 	case PSTORE_TYPE_BLACKBOX:
915 		return psz_record_write(cxt->bpsz, record);
916 	default:
917 		return -EINVAL;
918 	}
919 }
920 
psz_read_next_zone(struct psz_context * cxt)921 static struct pstore_zone *psz_read_next_zone(struct psz_context *cxt)
922 {
923 	struct pstore_zone *zone = NULL;
924 
925 	while (cxt->kmsg_read_cnt < cxt->kmsg_max_cnt) {
926 		zone = cxt->kpszs[cxt->kmsg_read_cnt++];
927 		if (psz_ok(zone))
928 			return zone;
929 	}
930 
931 	if (cxt->ftrace_read_cnt < cxt->ftrace_max_cnt)
932 		/*
933 		 * No need psz_old_ok(). Let psz_ftrace_read() do so for
934 		 * combination. psz_ftrace_read() should traverse over
935 		 * all zones in case of some zone without data.
936 		 */
937 		return cxt->fpszs[cxt->ftrace_read_cnt++];
938 
939 	if (cxt->pmsg_read_cnt == 0) {
940 		cxt->pmsg_read_cnt++;
941 		zone = cxt->ppsz;
942 		if (psz_old_ok(zone))
943 			return zone;
944 	}
945 
946 	if (cxt->console_read_cnt == 0) {
947 		cxt->console_read_cnt++;
948 		zone = cxt->cpsz;
949 		if (psz_old_ok(zone))
950 			return zone;
951 	}
952 
953 	if (cxt->blackbox_read_cnt == 0) {
954 		cxt->blackbox_read_cnt++;
955 		zone = cxt->bpsz;
956 		if (psz_old_ok(zone))
957 			return zone;
958 	}
959 
960 	return NULL;
961 }
962 
psz_kmsg_read_hdr(struct pstore_zone * zone,struct pstore_record * record)963 static int psz_kmsg_read_hdr(struct pstore_zone *zone,
964 		struct pstore_record *record)
965 {
966 	struct psz_buffer *buffer = zone->buffer;
967 	struct psz_kmsg_header *hdr =
968 		(struct psz_kmsg_header *)buffer->data;
969 
970 	if (hdr->magic != PSTORE_KMSG_HEADER_MAGIC)
971 		return -EINVAL;
972 	record->compressed = hdr->compressed;
973 	record->time.tv_sec = hdr->time.tv_sec;
974 	record->time.tv_nsec = hdr->time.tv_nsec;
975 	record->reason = hdr->reason;
976 	record->count = hdr->counter;
977 	return 0;
978 }
979 
psz_kmsg_read(struct pstore_zone * zone,struct pstore_record * record)980 static ssize_t psz_kmsg_read(struct pstore_zone *zone,
981 		struct pstore_record *record)
982 {
983 	ssize_t size, hlen = 0;
984 
985 	size = buffer_datalen(zone);
986 	/* Clear and skip this kmsg dump record if it has no valid header */
987 	if (psz_kmsg_read_hdr(zone, record)) {
988 		atomic_set(&zone->buffer->datalen, 0);
989 		atomic_set(&zone->dirty, 0);
990 		return -ENOMSG;
991 	}
992 	size -= sizeof(struct psz_kmsg_header);
993 
994 	if (!record->compressed) {
995 		char *buf = kasprintf(GFP_KERNEL, "%s: Total %d times\n",
996 				      kmsg_dump_reason_str(record->reason),
997 				      record->count);
998 		hlen = strlen(buf);
999 		record->buf = krealloc(buf, hlen + size, GFP_KERNEL);
1000 		if (!record->buf) {
1001 			kfree(buf);
1002 			return -ENOMEM;
1003 		}
1004 	} else {
1005 		record->buf = kmalloc(size, GFP_KERNEL);
1006 		if (!record->buf)
1007 			return -ENOMEM;
1008 	}
1009 
1010 	size = psz_zone_read_buffer(zone, record->buf + hlen, size,
1011 			sizeof(struct psz_kmsg_header));
1012 	if (unlikely(size < 0)) {
1013 		kfree(record->buf);
1014 		return -ENOMSG;
1015 	}
1016 
1017 	return size + hlen;
1018 }
1019 
1020 /* try to combine all ftrace zones */
psz_ftrace_read(struct pstore_zone * zone,struct pstore_record * record)1021 static ssize_t psz_ftrace_read(struct pstore_zone *zone,
1022 		struct pstore_record *record)
1023 {
1024 	struct psz_context *cxt;
1025 	struct psz_buffer *buf;
1026 	int ret;
1027 
1028 	if (!zone || !record)
1029 		return -ENOSPC;
1030 
1031 	if (!psz_old_ok(zone))
1032 		goto out;
1033 
1034 	buf = (struct psz_buffer *)zone->oldbuf;
1035 	if (!buf)
1036 		return -ENOMSG;
1037 
1038 	ret = pstore_ftrace_combine_log(&record->buf, &record->size,
1039 			(char *)buf->data, atomic_read(&buf->datalen));
1040 	if (unlikely(ret))
1041 		return ret;
1042 
1043 out:
1044 	cxt = record->psi->data;
1045 	if (cxt->ftrace_read_cnt < cxt->ftrace_max_cnt)
1046 		/* then, read next ftrace zone */
1047 		return -ENOMSG;
1048 	record->id = 0;
1049 	return record->size ? record->size : -ENOMSG;
1050 }
1051 
psz_record_read(struct pstore_zone * zone,struct pstore_record * record)1052 static ssize_t psz_record_read(struct pstore_zone *zone,
1053 		struct pstore_record *record)
1054 {
1055 	size_t len;
1056 	struct psz_buffer *buf;
1057 
1058 	if (!zone || !record)
1059 		return -ENOSPC;
1060 
1061 	buf = (struct psz_buffer *)zone->oldbuf;
1062 	if (!buf)
1063 		return -ENOMSG;
1064 
1065 	len = atomic_read(&buf->datalen);
1066 	record->buf = kmalloc(len, GFP_KERNEL);
1067 	if (!record->buf)
1068 		return -ENOMEM;
1069 
1070 	if (unlikely(psz_zone_read_oldbuf(zone, record->buf, len, 0))) {
1071 		kfree(record->buf);
1072 		return -ENOMSG;
1073 	}
1074 
1075 	return len;
1076 }
1077 
psz_pstore_read(struct pstore_record * record)1078 static ssize_t psz_pstore_read(struct pstore_record *record)
1079 {
1080 	struct psz_context *cxt = record->psi->data;
1081 	ssize_t (*readop)(struct pstore_zone *zone,
1082 			struct pstore_record *record);
1083 	struct pstore_zone *zone;
1084 	ssize_t ret;
1085 
1086 	/* before read, we must recover from storage */
1087 	ret = psz_recovery(cxt);
1088 	if (ret)
1089 		return ret;
1090 
1091 next_zone:
1092 	zone = psz_read_next_zone(cxt);
1093 	if (!zone)
1094 		return 0;
1095 
1096 	record->type = zone->type;
1097 	switch (record->type) {
1098 	case PSTORE_TYPE_DMESG:
1099 		readop = psz_kmsg_read;
1100 		record->id = cxt->kmsg_read_cnt - 1;
1101 		break;
1102 	case PSTORE_TYPE_FTRACE:
1103 		readop = psz_ftrace_read;
1104 		break;
1105 	case PSTORE_TYPE_CONSOLE:
1106 	case PSTORE_TYPE_PMSG:
1107 	case PSTORE_TYPE_BLACKBOX:
1108 		readop = psz_record_read;
1109 		break;
1110 	default:
1111 		goto next_zone;
1112 	}
1113 
1114 	ret = readop(zone, record);
1115 	if (ret == -ENOMSG)
1116 		goto next_zone;
1117 	return ret;
1118 }
1119 
1120 static struct psz_context pstore_zone_cxt = {
1121 	.pstore_zone_info_lock =
1122 		__MUTEX_INITIALIZER(pstore_zone_cxt.pstore_zone_info_lock),
1123 	.recovered = ATOMIC_INIT(0),
1124 	.on_panic = ATOMIC_INIT(0),
1125 	.pstore = {
1126 		.owner = THIS_MODULE,
1127 		.open = psz_pstore_open,
1128 		.read = psz_pstore_read,
1129 		.write = psz_pstore_write,
1130 		.erase = psz_pstore_erase,
1131 	},
1132 };
1133 
psz_free_zone(struct pstore_zone ** pszone)1134 static void psz_free_zone(struct pstore_zone **pszone)
1135 {
1136 	struct pstore_zone *zone = *pszone;
1137 
1138 	if (!zone)
1139 		return;
1140 
1141 	kfree(zone->buffer);
1142 	kfree(zone);
1143 	*pszone = NULL;
1144 }
1145 
psz_free_zones(struct pstore_zone *** pszones,unsigned int * cnt)1146 static void psz_free_zones(struct pstore_zone ***pszones, unsigned int *cnt)
1147 {
1148 	struct pstore_zone **zones = *pszones;
1149 
1150 	if (!zones)
1151 		return;
1152 
1153 	while (*cnt > 0) {
1154 		(*cnt)--;
1155 		psz_free_zone(&(zones[*cnt]));
1156 	}
1157 	kfree(zones);
1158 	*pszones = NULL;
1159 }
1160 
psz_free_all_zones(struct psz_context * cxt)1161 static void psz_free_all_zones(struct psz_context *cxt)
1162 {
1163 	if (cxt->kpszs)
1164 		psz_free_zones(&cxt->kpszs, &cxt->kmsg_max_cnt);
1165 	if (cxt->ppsz)
1166 		psz_free_zone(&cxt->ppsz);
1167 	if (cxt->cpsz)
1168 		psz_free_zone(&cxt->cpsz);
1169 	if (cxt->fpszs)
1170 		psz_free_zones(&cxt->fpszs, &cxt->ftrace_max_cnt);
1171 	if (cxt->bpsz)
1172 		psz_free_zone(&cxt->bpsz);
1173 }
1174 
psz_init_zone(enum pstore_type_id type,loff_t * off,size_t size)1175 static struct pstore_zone *psz_init_zone(enum pstore_type_id type,
1176 		loff_t *off, size_t size)
1177 {
1178 	struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info;
1179 	struct pstore_zone *zone;
1180 	const char *name = pstore_type_to_name(type);
1181 
1182 	if (!size)
1183 		return NULL;
1184 
1185 	if (*off + size > info->total_size) {
1186 		pr_err("no room for %s (0x%zx@0x%llx over 0x%lx)\n",
1187 			name, size, *off, info->total_size);
1188 		return ERR_PTR(-ENOMEM);
1189 	}
1190 
1191 	zone = kzalloc(sizeof(struct pstore_zone), GFP_KERNEL);
1192 	if (!zone)
1193 		return ERR_PTR(-ENOMEM);
1194 
1195 	zone->buffer = kmalloc(size, GFP_KERNEL);
1196 	if (!zone->buffer) {
1197 		kfree(zone);
1198 		return ERR_PTR(-ENOMEM);
1199 	}
1200 	memset(zone->buffer, 0xFF, size);
1201 	zone->off = *off;
1202 	zone->name = name;
1203 	zone->type = type;
1204 	zone->buffer_size = size - sizeof(struct psz_buffer);
1205 	zone->buffer->sig = type ^ PSZ_SIG;
1206 	zone->oldbuf = NULL;
1207 	atomic_set(&zone->dirty, 0);
1208 	atomic_set(&zone->buffer->datalen, 0);
1209 	atomic_set(&zone->buffer->start, 0);
1210 
1211 	*off += size;
1212 
1213 	pr_debug("pszone %s: off 0x%llx, %zu header, %zu data\n", zone->name,
1214 			zone->off, sizeof(*zone->buffer), zone->buffer_size);
1215 	return zone;
1216 }
1217 
psz_init_zones(enum pstore_type_id type,loff_t * off,size_t total_size,ssize_t record_size,unsigned int * cnt)1218 static struct pstore_zone **psz_init_zones(enum pstore_type_id type,
1219 	loff_t *off, size_t total_size, ssize_t record_size,
1220 	unsigned int *cnt)
1221 {
1222 	struct pstore_zone_info *info = pstore_zone_cxt.pstore_zone_info;
1223 	struct pstore_zone **zones, *zone;
1224 	const char *name = pstore_type_to_name(type);
1225 	int c, i;
1226 
1227 	*cnt = 0;
1228 	if (!total_size || !record_size)
1229 		return NULL;
1230 
1231 	if (*off + total_size > info->total_size) {
1232 		pr_err("no room for zones %s (0x%zx@0x%llx over 0x%lx)\n",
1233 			name, total_size, *off, info->total_size);
1234 		return ERR_PTR(-ENOMEM);
1235 	}
1236 
1237 	c = total_size / record_size;
1238 	zones = kcalloc(c, sizeof(*zones), GFP_KERNEL);
1239 	if (!zones) {
1240 		pr_err("allocate for zones %s failed\n", name);
1241 		return ERR_PTR(-ENOMEM);
1242 	}
1243 	memset(zones, 0, c * sizeof(*zones));
1244 
1245 	for (i = 0; i < c; i++) {
1246 		zone = psz_init_zone(type, off, record_size);
1247 		if (!zone || IS_ERR(zone)) {
1248 			pr_err("initialize zones %s failed\n", name);
1249 			psz_free_zones(&zones, &i);
1250 			return (void *)zone;
1251 		}
1252 		zones[i] = zone;
1253 	}
1254 
1255 	*cnt = c;
1256 	return zones;
1257 }
1258 
psz_alloc_zones(struct psz_context * cxt)1259 static int psz_alloc_zones(struct psz_context *cxt)
1260 {
1261 	struct pstore_zone_info *info = cxt->pstore_zone_info;
1262 	loff_t off = 0;
1263 	int err;
1264 	size_t off_size = 0;
1265 
1266 	off_size += info->pmsg_size;
1267 	cxt->ppsz = psz_init_zone(PSTORE_TYPE_PMSG, &off, info->pmsg_size);
1268 	if (IS_ERR(cxt->ppsz)) {
1269 		err = PTR_ERR(cxt->ppsz);
1270 		cxt->ppsz = NULL;
1271 		goto free_out;
1272 	}
1273 
1274 	off_size += info->console_size;
1275 	cxt->cpsz = psz_init_zone(PSTORE_TYPE_CONSOLE, &off,
1276 			info->console_size);
1277 	if (IS_ERR(cxt->cpsz)) {
1278 		err = PTR_ERR(cxt->cpsz);
1279 		cxt->cpsz = NULL;
1280 		goto free_out;
1281 	}
1282 
1283 	off_size += info->ftrace_size;
1284 	cxt->fpszs = psz_init_zones(PSTORE_TYPE_FTRACE, &off,
1285 			info->ftrace_size,
1286 			info->ftrace_size / nr_cpu_ids,
1287 			&cxt->ftrace_max_cnt);
1288 	if (IS_ERR(cxt->fpszs)) {
1289 		err = PTR_ERR(cxt->fpszs);
1290 		cxt->fpszs = NULL;
1291 		goto free_out;
1292 	}
1293 
1294 	off_size += info->blackbox_size;
1295 	cxt->bpsz = psz_init_zone(PSTORE_TYPE_BLACKBOX, &off,
1296 			info->blackbox_size);
1297 	if (IS_ERR(cxt->bpsz)) {
1298 		err = PTR_ERR(cxt->bpsz);
1299 		cxt->bpsz = NULL;
1300 		goto free_out;
1301 	}
1302 
1303 	cxt->kpszs = psz_init_zones(PSTORE_TYPE_DMESG, &off,
1304 			info->total_size - off_size,
1305 			info->kmsg_size, &cxt->kmsg_max_cnt);
1306 	if (IS_ERR(cxt->kpszs)) {
1307 		err = PTR_ERR(cxt->kpszs);
1308 		cxt->kpszs = NULL;
1309 		goto free_out;
1310 	}
1311 
1312 	return 0;
1313 free_out:
1314 	psz_free_all_zones(cxt);
1315 	return err;
1316 }
1317 
1318 /**
1319  * register_pstore_zone() - register to pstore/zone
1320  *
1321  * @info: back-end driver information. See &struct pstore_zone_info.
1322  *
1323  * Only one back-end at one time.
1324  *
1325  * Return: 0 on success, others on failure.
1326  */
register_pstore_zone(struct pstore_zone_info * info)1327 int register_pstore_zone(struct pstore_zone_info *info)
1328 {
1329 	int err = -EINVAL;
1330 	struct psz_context *cxt = &pstore_zone_cxt;
1331 
1332 	if (info->total_size < 4096) {
1333 		pr_warn("total_size must be >= 4096\n");
1334 		return -EINVAL;
1335 	}
1336 
1337 	if (!info->kmsg_size && !info->pmsg_size && !info->console_size &&
1338 	    !info->ftrace_size && !info->blackbox_size) {
1339 		pr_warn("at least one record size must be non-zero\n");
1340 		return -EINVAL;
1341 	}
1342 
1343 	if (!info->name || !info->name[0])
1344 		return -EINVAL;
1345 
1346 #define check_size(name, size) {					\
1347 		if (info->name > 0 && info->name < (size)) {		\
1348 			pr_err(#name " must be over %d\n", (size));	\
1349 			return -EINVAL;					\
1350 		}							\
1351 		if (info->name & (size - 1)) {				\
1352 			pr_err(#name " must be a multiple of %d\n",	\
1353 					(size));			\
1354 			return -EINVAL;					\
1355 		}							\
1356 	}
1357 
1358 	check_size(total_size, 4096);
1359 	check_size(kmsg_size, SECTOR_SIZE);
1360 	check_size(pmsg_size, SECTOR_SIZE);
1361 	check_size(console_size, SECTOR_SIZE);
1362 	check_size(ftrace_size, SECTOR_SIZE);
1363 	check_size(blackbox_size, SECTOR_SIZE);
1364 
1365 #undef check_size
1366 
1367 	/*
1368 	 * the @read and @write must be applied.
1369 	 * if no @read, pstore may mount failed.
1370 	 * if no @write, pstore do not support to remove record file.
1371 	 */
1372 	if (!info->read || !info->write) {
1373 		pr_err("no valid general read/write interface\n");
1374 		return -EINVAL;
1375 	}
1376 
1377 	mutex_lock(&cxt->pstore_zone_info_lock);
1378 	if (cxt->pstore_zone_info) {
1379 		pr_warn("'%s' already loaded: ignoring '%s'\n",
1380 				cxt->pstore_zone_info->name, info->name);
1381 		mutex_unlock(&cxt->pstore_zone_info_lock);
1382 		return -EBUSY;
1383 	}
1384 	cxt->pstore_zone_info = info;
1385 
1386 	pr_debug("register %s with properties:\n", info->name);
1387 	pr_debug("\ttotal size : %ld Bytes\n", info->total_size);
1388 	pr_debug("\tkmsg size : %ld Bytes\n", info->kmsg_size);
1389 	pr_debug("\tpmsg size : %ld Bytes\n", info->pmsg_size);
1390 	pr_debug("\tconsole size : %ld Bytes\n", info->console_size);
1391 	pr_debug("\tftrace size : %ld Bytes\n", info->ftrace_size);
1392 	pr_debug("\tblackbox size : %ld Bytes\n", info->blackbox_size);
1393 
1394 	err = psz_alloc_zones(cxt);
1395 	if (err) {
1396 		pr_err("alloc zones failed\n");
1397 		goto fail_out;
1398 	}
1399 
1400 	if (info->kmsg_size) {
1401 		cxt->pstore.bufsize = cxt->kpszs[0]->buffer_size -
1402 			sizeof(struct psz_kmsg_header);
1403 		cxt->pstore.buf = kzalloc(cxt->pstore.bufsize, GFP_KERNEL);
1404 		if (!cxt->pstore.buf) {
1405 			err = -ENOMEM;
1406 			goto fail_free;
1407 		}
1408 	}
1409 	cxt->pstore.data = cxt;
1410 
1411 	pr_info("registered %s as backend for", info->name);
1412 	cxt->pstore.max_reason = info->max_reason;
1413 	cxt->pstore.name = info->name;
1414 	if (info->kmsg_size) {
1415 		cxt->pstore.flags |= PSTORE_FLAGS_DMESG;
1416 		pr_cont(" kmsg(%s",
1417 			kmsg_dump_reason_str(cxt->pstore.max_reason));
1418 		if (cxt->pstore_zone_info->panic_write)
1419 			pr_cont(",panic_write");
1420 		pr_cont(")");
1421 	}
1422 	if (info->pmsg_size) {
1423 		cxt->pstore.flags |= PSTORE_FLAGS_PMSG;
1424 		pr_cont(" pmsg");
1425 	}
1426 	if (info->console_size) {
1427 		cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE;
1428 		pr_cont(" console");
1429 	}
1430 	if (info->ftrace_size) {
1431 		cxt->pstore.flags |= PSTORE_FLAGS_FTRACE;
1432 		pr_cont(" ftrace");
1433 	}
1434 	if (info->blackbox_size) {
1435 		cxt->pstore.flags |= PSTORE_FLAGS_BLACKBOX;
1436 		pr_cont(" blackbox");
1437 	}
1438 	pr_cont("\n");
1439 
1440 	err = pstore_register(&cxt->pstore);
1441 	if (err) {
1442 		pr_err("registering with pstore failed\n");
1443 		goto fail_free;
1444 	}
1445 	mutex_unlock(&pstore_zone_cxt.pstore_zone_info_lock);
1446 
1447 	return 0;
1448 
1449 fail_free:
1450 	kfree(cxt->pstore.buf);
1451 	cxt->pstore.buf = NULL;
1452 	cxt->pstore.bufsize = 0;
1453 	psz_free_all_zones(cxt);
1454 fail_out:
1455 	pstore_zone_cxt.pstore_zone_info = NULL;
1456 	mutex_unlock(&pstore_zone_cxt.pstore_zone_info_lock);
1457 	return err;
1458 }
1459 EXPORT_SYMBOL_GPL(register_pstore_zone);
1460 
1461 /**
1462  * unregister_pstore_zone() - unregister to pstore/zone
1463  *
1464  * @info: back-end driver information. See struct pstore_zone_info.
1465  */
unregister_pstore_zone(struct pstore_zone_info * info)1466 void unregister_pstore_zone(struct pstore_zone_info *info)
1467 {
1468 	struct psz_context *cxt = &pstore_zone_cxt;
1469 
1470 	mutex_lock(&cxt->pstore_zone_info_lock);
1471 	if (!cxt->pstore_zone_info) {
1472 		mutex_unlock(&cxt->pstore_zone_info_lock);
1473 		return;
1474 	}
1475 
1476 	/* Stop incoming writes from pstore. */
1477 	pstore_unregister(&cxt->pstore);
1478 
1479 	/* Flush any pending writes. */
1480 	psz_flush_all_dirty_zones(NULL);
1481 	flush_delayed_work(&psz_cleaner);
1482 
1483 	/* Clean up allocations. */
1484 	kfree(cxt->pstore.buf);
1485 	cxt->pstore.buf = NULL;
1486 	cxt->pstore.bufsize = 0;
1487 	cxt->pstore_zone_info = NULL;
1488 
1489 	psz_free_all_zones(cxt);
1490 
1491 	/* Clear counters and zone state. */
1492 	cxt->oops_counter = 0;
1493 	cxt->panic_counter = 0;
1494 	atomic_set(&cxt->recovered, 0);
1495 	atomic_set(&cxt->on_panic, 0);
1496 
1497 	mutex_unlock(&cxt->pstore_zone_info_lock);
1498 }
1499 EXPORT_SYMBOL_GPL(unregister_pstore_zone);
1500 
1501 MODULE_LICENSE("GPL");
1502 MODULE_AUTHOR("WeiXiong Liao <liaoweixiong@allwinnertech.com>");
1503 MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
1504 MODULE_DESCRIPTION("Storage Manager for pstore/blk");
1505