• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Firmware Assisted dump: A robust mechanism to get reliable kernel crash
3  * dump with assistance from firmware. This approach does not use kexec,
4  * instead firmware assists in booting the kdump kernel while preserving
5  * memory contents. The most of the code implementation has been adapted
6  * from phyp assisted dump implementation written by Linas Vepstas and
7  * Manish Ahuja
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22  *
23  * Copyright 2011 IBM Corporation
24  * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
25  */
26 
27 #undef DEBUG
28 #define pr_fmt(fmt) "fadump: " fmt
29 
30 #include <linux/string.h>
31 #include <linux/memblock.h>
32 #include <linux/delay.h>
33 #include <linux/seq_file.h>
34 #include <linux/crash_dump.h>
35 #include <linux/kobject.h>
36 #include <linux/sysfs.h>
37 
38 #include <asm/debugfs.h>
39 #include <asm/page.h>
40 #include <asm/prom.h>
41 #include <asm/rtas.h>
42 #include <asm/fadump.h>
43 #include <asm/setup.h>
44 
45 static struct fw_dump fw_dump;
46 static struct fadump_mem_struct fdm;
47 static const struct fadump_mem_struct *fdm_active;
48 
49 static DEFINE_MUTEX(fadump_mutex);
50 struct fad_crash_memory_ranges *crash_memory_ranges;
51 int crash_memory_ranges_size;
52 int crash_mem_ranges;
53 int max_crash_mem_ranges;
54 
55 /* Scan the Firmware Assisted dump configuration details. */
early_init_dt_scan_fw_dump(unsigned long node,const char * uname,int depth,void * data)56 int __init early_init_dt_scan_fw_dump(unsigned long node,
57 			const char *uname, int depth, void *data)
58 {
59 	const __be32 *sections;
60 	int i, num_sections;
61 	int size;
62 	const __be32 *token;
63 
64 	if (depth != 1 || strcmp(uname, "rtas") != 0)
65 		return 0;
66 
67 	/*
68 	 * Check if Firmware Assisted dump is supported. if yes, check
69 	 * if dump has been initiated on last reboot.
70 	 */
71 	token = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump", NULL);
72 	if (!token)
73 		return 1;
74 
75 	fw_dump.fadump_supported = 1;
76 	fw_dump.ibm_configure_kernel_dump = be32_to_cpu(*token);
77 
78 	/*
79 	 * The 'ibm,kernel-dump' rtas node is present only if there is
80 	 * dump data waiting for us.
81 	 */
82 	fdm_active = of_get_flat_dt_prop(node, "ibm,kernel-dump", NULL);
83 	if (fdm_active)
84 		fw_dump.dump_active = 1;
85 
86 	/* Get the sizes required to store dump data for the firmware provided
87 	 * dump sections.
88 	 * For each dump section type supported, a 32bit cell which defines
89 	 * the ID of a supported section followed by two 32 bit cells which
90 	 * gives teh size of the section in bytes.
91 	 */
92 	sections = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump-sizes",
93 					&size);
94 
95 	if (!sections)
96 		return 1;
97 
98 	num_sections = size / (3 * sizeof(u32));
99 
100 	for (i = 0; i < num_sections; i++, sections += 3) {
101 		u32 type = (u32)of_read_number(sections, 1);
102 
103 		switch (type) {
104 		case FADUMP_CPU_STATE_DATA:
105 			fw_dump.cpu_state_data_size =
106 					of_read_ulong(&sections[1], 2);
107 			break;
108 		case FADUMP_HPTE_REGION:
109 			fw_dump.hpte_region_size =
110 					of_read_ulong(&sections[1], 2);
111 			break;
112 		}
113 	}
114 
115 	return 1;
116 }
117 
118 /*
119  * If fadump is registered, check if the memory provided
120  * falls within boot memory area and reserved memory area.
121  */
is_fadump_memory_area(u64 addr,ulong size)122 int is_fadump_memory_area(u64 addr, ulong size)
123 {
124 	u64 d_start = fw_dump.reserve_dump_area_start;
125 	u64 d_end = d_start + fw_dump.reserve_dump_area_size;
126 
127 	if (!fw_dump.dump_registered)
128 		return 0;
129 
130 	if (((addr + size) > d_start) && (addr <= d_end))
131 		return 1;
132 
133 	return (addr + size) > RMA_START && addr <= fw_dump.boot_memory_size;
134 }
135 
should_fadump_crash(void)136 int should_fadump_crash(void)
137 {
138 	if (!fw_dump.dump_registered || !fw_dump.fadumphdr_addr)
139 		return 0;
140 	return 1;
141 }
142 
is_fadump_active(void)143 int is_fadump_active(void)
144 {
145 	return fw_dump.dump_active;
146 }
147 
148 /*
149  * Returns 1, if there are no holes in boot memory area,
150  * 0 otherwise.
151  */
is_boot_memory_area_contiguous(void)152 static int is_boot_memory_area_contiguous(void)
153 {
154 	struct memblock_region *reg;
155 	unsigned long tstart, tend;
156 	unsigned long start_pfn = PHYS_PFN(RMA_START);
157 	unsigned long end_pfn = PHYS_PFN(RMA_START + fw_dump.boot_memory_size);
158 	unsigned int ret = 0;
159 
160 	for_each_memblock(memory, reg) {
161 		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
162 		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
163 		if (tstart < tend) {
164 			/* Memory hole from start_pfn to tstart */
165 			if (tstart > start_pfn)
166 				break;
167 
168 			if (tend == end_pfn) {
169 				ret = 1;
170 				break;
171 			}
172 
173 			start_pfn = tend + 1;
174 		}
175 	}
176 
177 	return ret;
178 }
179 
180 /* Print firmware assisted dump configurations for debugging purpose. */
fadump_show_config(void)181 static void fadump_show_config(void)
182 {
183 	pr_debug("Support for firmware-assisted dump (fadump): %s\n",
184 			(fw_dump.fadump_supported ? "present" : "no support"));
185 
186 	if (!fw_dump.fadump_supported)
187 		return;
188 
189 	pr_debug("Fadump enabled    : %s\n",
190 				(fw_dump.fadump_enabled ? "yes" : "no"));
191 	pr_debug("Dump Active       : %s\n",
192 				(fw_dump.dump_active ? "yes" : "no"));
193 	pr_debug("Dump section sizes:\n");
194 	pr_debug("    CPU state data size: %lx\n", fw_dump.cpu_state_data_size);
195 	pr_debug("    HPTE region size   : %lx\n", fw_dump.hpte_region_size);
196 	pr_debug("Boot memory size  : %lx\n", fw_dump.boot_memory_size);
197 }
198 
init_fadump_mem_struct(struct fadump_mem_struct * fdm,unsigned long addr)199 static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm,
200 				unsigned long addr)
201 {
202 	if (!fdm)
203 		return 0;
204 
205 	memset(fdm, 0, sizeof(struct fadump_mem_struct));
206 	addr = addr & PAGE_MASK;
207 
208 	fdm->header.dump_format_version = cpu_to_be32(0x00000001);
209 	fdm->header.dump_num_sections = cpu_to_be16(3);
210 	fdm->header.dump_status_flag = 0;
211 	fdm->header.offset_first_dump_section =
212 		cpu_to_be32((u32)offsetof(struct fadump_mem_struct, cpu_state_data));
213 
214 	/*
215 	 * Fields for disk dump option.
216 	 * We are not using disk dump option, hence set these fields to 0.
217 	 */
218 	fdm->header.dd_block_size = 0;
219 	fdm->header.dd_block_offset = 0;
220 	fdm->header.dd_num_blocks = 0;
221 	fdm->header.dd_offset_disk_path = 0;
222 
223 	/* set 0 to disable an automatic dump-reboot. */
224 	fdm->header.max_time_auto = 0;
225 
226 	/* Kernel dump sections */
227 	/* cpu state data section. */
228 	fdm->cpu_state_data.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG);
229 	fdm->cpu_state_data.source_data_type = cpu_to_be16(FADUMP_CPU_STATE_DATA);
230 	fdm->cpu_state_data.source_address = 0;
231 	fdm->cpu_state_data.source_len = cpu_to_be64(fw_dump.cpu_state_data_size);
232 	fdm->cpu_state_data.destination_address = cpu_to_be64(addr);
233 	addr += fw_dump.cpu_state_data_size;
234 
235 	/* hpte region section */
236 	fdm->hpte_region.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG);
237 	fdm->hpte_region.source_data_type = cpu_to_be16(FADUMP_HPTE_REGION);
238 	fdm->hpte_region.source_address = 0;
239 	fdm->hpte_region.source_len = cpu_to_be64(fw_dump.hpte_region_size);
240 	fdm->hpte_region.destination_address = cpu_to_be64(addr);
241 	addr += fw_dump.hpte_region_size;
242 
243 	/* RMA region section */
244 	fdm->rmr_region.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG);
245 	fdm->rmr_region.source_data_type = cpu_to_be16(FADUMP_REAL_MODE_REGION);
246 	fdm->rmr_region.source_address = cpu_to_be64(RMA_START);
247 	fdm->rmr_region.source_len = cpu_to_be64(fw_dump.boot_memory_size);
248 	fdm->rmr_region.destination_address = cpu_to_be64(addr);
249 	addr += fw_dump.boot_memory_size;
250 
251 	return addr;
252 }
253 
254 /**
255  * fadump_calculate_reserve_size(): reserve variable boot area 5% of System RAM
256  *
257  * Function to find the largest memory size we need to reserve during early
258  * boot process. This will be the size of the memory that is required for a
259  * kernel to boot successfully.
260  *
261  * This function has been taken from phyp-assisted dump feature implementation.
262  *
263  * returns larger of 256MB or 5% rounded down to multiples of 256MB.
264  *
265  * TODO: Come up with better approach to find out more accurate memory size
266  * that is required for a kernel to boot successfully.
267  *
268  */
fadump_calculate_reserve_size(void)269 static inline unsigned long fadump_calculate_reserve_size(void)
270 {
271 	int ret;
272 	unsigned long long base, size;
273 
274 	if (fw_dump.reserve_bootvar)
275 		pr_warn("'fadump_reserve_mem=' parameter is deprecated in favor of 'crashkernel=' parameter.\n");
276 
277 	/*
278 	 * Check if the size is specified through crashkernel= cmdline
279 	 * option. If yes, then use that but ignore base as fadump reserves
280 	 * memory at a predefined offset.
281 	 */
282 	ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
283 				&size, &base);
284 	if (ret == 0 && size > 0) {
285 		unsigned long max_size;
286 
287 		if (fw_dump.reserve_bootvar)
288 			pr_info("Using 'crashkernel=' parameter for memory reservation.\n");
289 
290 		fw_dump.reserve_bootvar = (unsigned long)size;
291 
292 		/*
293 		 * Adjust if the boot memory size specified is above
294 		 * the upper limit.
295 		 */
296 		max_size = memblock_phys_mem_size() / MAX_BOOT_MEM_RATIO;
297 		if (fw_dump.reserve_bootvar > max_size) {
298 			fw_dump.reserve_bootvar = max_size;
299 			pr_info("Adjusted boot memory size to %luMB\n",
300 				(fw_dump.reserve_bootvar >> 20));
301 		}
302 
303 		return fw_dump.reserve_bootvar;
304 	} else if (fw_dump.reserve_bootvar) {
305 		/*
306 		 * 'fadump_reserve_mem=' is being used to reserve memory
307 		 * for firmware-assisted dump.
308 		 */
309 		return fw_dump.reserve_bootvar;
310 	}
311 
312 	/* divide by 20 to get 5% of value */
313 	size = memblock_phys_mem_size() / 20;
314 
315 	/* round it down in multiples of 256 */
316 	size = size & ~0x0FFFFFFFUL;
317 
318 	/* Truncate to memory_limit. We don't want to over reserve the memory.*/
319 	if (memory_limit && size > memory_limit)
320 		size = memory_limit;
321 
322 	return (size > MIN_BOOT_MEM ? size : MIN_BOOT_MEM);
323 }
324 
325 /*
326  * Calculate the total memory size required to be reserved for
327  * firmware-assisted dump registration.
328  */
get_fadump_area_size(void)329 static unsigned long get_fadump_area_size(void)
330 {
331 	unsigned long size = 0;
332 
333 	size += fw_dump.cpu_state_data_size;
334 	size += fw_dump.hpte_region_size;
335 	size += fw_dump.boot_memory_size;
336 	size += sizeof(struct fadump_crash_info_header);
337 	size += sizeof(struct elfhdr); /* ELF core header.*/
338 	size += sizeof(struct elf_phdr); /* place holder for cpu notes */
339 	/* Program headers for crash memory regions. */
340 	size += sizeof(struct elf_phdr) * (memblock_num_regions(memory) + 2);
341 
342 	size = PAGE_ALIGN(size);
343 	return size;
344 }
345 
fadump_reserve_mem(void)346 int __init fadump_reserve_mem(void)
347 {
348 	unsigned long base, size, memory_boundary;
349 
350 	if (!fw_dump.fadump_enabled)
351 		return 0;
352 
353 	if (!fw_dump.fadump_supported) {
354 		printk(KERN_INFO "Firmware-assisted dump is not supported on"
355 				" this hardware\n");
356 		fw_dump.fadump_enabled = 0;
357 		return 0;
358 	}
359 	/*
360 	 * Initialize boot memory size
361 	 * If dump is active then we have already calculated the size during
362 	 * first kernel.
363 	 */
364 	if (fdm_active)
365 		fw_dump.boot_memory_size = be64_to_cpu(fdm_active->rmr_region.source_len);
366 	else
367 		fw_dump.boot_memory_size = fadump_calculate_reserve_size();
368 
369 	/*
370 	 * Calculate the memory boundary.
371 	 * If memory_limit is less than actual memory boundary then reserve
372 	 * the memory for fadump beyond the memory_limit and adjust the
373 	 * memory_limit accordingly, so that the running kernel can run with
374 	 * specified memory_limit.
375 	 */
376 	if (memory_limit && memory_limit < memblock_end_of_DRAM()) {
377 		size = get_fadump_area_size();
378 		if ((memory_limit + size) < memblock_end_of_DRAM())
379 			memory_limit += size;
380 		else
381 			memory_limit = memblock_end_of_DRAM();
382 		printk(KERN_INFO "Adjusted memory_limit for firmware-assisted"
383 				" dump, now %#016llx\n", memory_limit);
384 	}
385 	if (memory_limit)
386 		memory_boundary = memory_limit;
387 	else
388 		memory_boundary = memblock_end_of_DRAM();
389 
390 	if (fw_dump.dump_active) {
391 		printk(KERN_INFO "Firmware-assisted dump is active.\n");
392 		/*
393 		 * If last boot has crashed then reserve all the memory
394 		 * above boot_memory_size so that we don't touch it until
395 		 * dump is written to disk by userspace tool. This memory
396 		 * will be released for general use once the dump is saved.
397 		 */
398 		base = fw_dump.boot_memory_size;
399 		size = memory_boundary - base;
400 		memblock_reserve(base, size);
401 		printk(KERN_INFO "Reserved %ldMB of memory at %ldMB "
402 				"for saving crash dump\n",
403 				(unsigned long)(size >> 20),
404 				(unsigned long)(base >> 20));
405 
406 		fw_dump.fadumphdr_addr =
407 				be64_to_cpu(fdm_active->rmr_region.destination_address) +
408 				be64_to_cpu(fdm_active->rmr_region.source_len);
409 		pr_debug("fadumphdr_addr = %p\n",
410 				(void *) fw_dump.fadumphdr_addr);
411 	} else {
412 		size = get_fadump_area_size();
413 
414 		/*
415 		 * Reserve memory at an offset closer to bottom of the RAM to
416 		 * minimize the impact of memory hot-remove operation. We can't
417 		 * use memblock_find_in_range() here since it doesn't allocate
418 		 * from bottom to top.
419 		 */
420 		for (base = fw_dump.boot_memory_size;
421 		     base <= (memory_boundary - size);
422 		     base += size) {
423 			if (memblock_is_region_memory(base, size) &&
424 			    !memblock_is_region_reserved(base, size))
425 				break;
426 		}
427 		if ((base > (memory_boundary - size)) ||
428 		    memblock_reserve(base, size)) {
429 			pr_err("Failed to reserve memory\n");
430 			return 0;
431 		}
432 
433 		pr_info("Reserved %ldMB of memory at %ldMB for firmware-"
434 			"assisted dump (System RAM: %ldMB)\n",
435 			(unsigned long)(size >> 20),
436 			(unsigned long)(base >> 20),
437 			(unsigned long)(memblock_phys_mem_size() >> 20));
438 	}
439 
440 	fw_dump.reserve_dump_area_start = base;
441 	fw_dump.reserve_dump_area_size = size;
442 	return 1;
443 }
444 
arch_reserved_kernel_pages(void)445 unsigned long __init arch_reserved_kernel_pages(void)
446 {
447 	return memblock_reserved_size() / PAGE_SIZE;
448 }
449 
450 /* Look for fadump= cmdline option. */
early_fadump_param(char * p)451 static int __init early_fadump_param(char *p)
452 {
453 	if (!p)
454 		return 1;
455 
456 	if (strncmp(p, "on", 2) == 0)
457 		fw_dump.fadump_enabled = 1;
458 	else if (strncmp(p, "off", 3) == 0)
459 		fw_dump.fadump_enabled = 0;
460 
461 	return 0;
462 }
463 early_param("fadump", early_fadump_param);
464 
465 /*
466  * Look for fadump_reserve_mem= cmdline option
467  * TODO: Remove references to 'fadump_reserve_mem=' parameter,
468  *       the sooner 'crashkernel=' parameter is accustomed to.
469  */
early_fadump_reserve_mem(char * p)470 static int __init early_fadump_reserve_mem(char *p)
471 {
472 	if (p)
473 		fw_dump.reserve_bootvar = memparse(p, &p);
474 	return 0;
475 }
476 early_param("fadump_reserve_mem", early_fadump_reserve_mem);
477 
register_fw_dump(struct fadump_mem_struct * fdm)478 static int register_fw_dump(struct fadump_mem_struct *fdm)
479 {
480 	int rc, err;
481 	unsigned int wait_time;
482 
483 	pr_debug("Registering for firmware-assisted kernel dump...\n");
484 
485 	/* TODO: Add upper time limit for the delay */
486 	do {
487 		rc = rtas_call(fw_dump.ibm_configure_kernel_dump, 3, 1, NULL,
488 			FADUMP_REGISTER, fdm,
489 			sizeof(struct fadump_mem_struct));
490 
491 		wait_time = rtas_busy_delay_time(rc);
492 		if (wait_time)
493 			mdelay(wait_time);
494 
495 	} while (wait_time);
496 
497 	err = -EIO;
498 	switch (rc) {
499 	default:
500 		pr_err("Failed to register. Unknown Error(%d).\n", rc);
501 		break;
502 	case -1:
503 		printk(KERN_ERR "Failed to register firmware-assisted kernel"
504 			" dump. Hardware Error(%d).\n", rc);
505 		break;
506 	case -3:
507 		if (!is_boot_memory_area_contiguous())
508 			pr_err("Can't have holes in boot memory area while "
509 			       "registering fadump\n");
510 
511 		printk(KERN_ERR "Failed to register firmware-assisted kernel"
512 			" dump. Parameter Error(%d).\n", rc);
513 		err = -EINVAL;
514 		break;
515 	case -9:
516 		printk(KERN_ERR "firmware-assisted kernel dump is already "
517 			" registered.");
518 		fw_dump.dump_registered = 1;
519 		err = -EEXIST;
520 		break;
521 	case 0:
522 		printk(KERN_INFO "firmware-assisted kernel dump registration"
523 			" is successful\n");
524 		fw_dump.dump_registered = 1;
525 		err = 0;
526 		break;
527 	}
528 	return err;
529 }
530 
crash_fadump(struct pt_regs * regs,const char * str)531 void crash_fadump(struct pt_regs *regs, const char *str)
532 {
533 	struct fadump_crash_info_header *fdh = NULL;
534 	int old_cpu, this_cpu;
535 
536 	if (!should_fadump_crash())
537 		return;
538 
539 	/*
540 	 * old_cpu == -1 means this is the first CPU which has come here,
541 	 * go ahead and trigger fadump.
542 	 *
543 	 * old_cpu != -1 means some other CPU has already on it's way
544 	 * to trigger fadump, just keep looping here.
545 	 */
546 	this_cpu = smp_processor_id();
547 	old_cpu = cmpxchg(&crashing_cpu, -1, this_cpu);
548 
549 	if (old_cpu != -1) {
550 		/*
551 		 * We can't loop here indefinitely. Wait as long as fadump
552 		 * is in force. If we race with fadump un-registration this
553 		 * loop will break and then we go down to normal panic path
554 		 * and reboot. If fadump is in force the first crashing
555 		 * cpu will definitely trigger fadump.
556 		 */
557 		while (fw_dump.dump_registered)
558 			cpu_relax();
559 		return;
560 	}
561 
562 	fdh = __va(fw_dump.fadumphdr_addr);
563 	fdh->crashing_cpu = crashing_cpu;
564 	crash_save_vmcoreinfo();
565 
566 	if (regs)
567 		fdh->regs = *regs;
568 	else
569 		ppc_save_regs(&fdh->regs);
570 
571 	fdh->online_mask = *cpu_online_mask;
572 
573 	/* Call ibm,os-term rtas call to trigger firmware assisted dump */
574 	rtas_os_term((char *)str);
575 }
576 
577 #define GPR_MASK	0xffffff0000000000
fadump_gpr_index(u64 id)578 static inline int fadump_gpr_index(u64 id)
579 {
580 	int i = -1;
581 	char str[3];
582 
583 	if ((id & GPR_MASK) == REG_ID("GPR")) {
584 		/* get the digits at the end */
585 		id &= ~GPR_MASK;
586 		id >>= 24;
587 		str[2] = '\0';
588 		str[1] = id & 0xff;
589 		str[0] = (id >> 8) & 0xff;
590 		sscanf(str, "%d", &i);
591 		if (i > 31)
592 			i = -1;
593 	}
594 	return i;
595 }
596 
fadump_set_regval(struct pt_regs * regs,u64 reg_id,u64 reg_val)597 static inline void fadump_set_regval(struct pt_regs *regs, u64 reg_id,
598 								u64 reg_val)
599 {
600 	int i;
601 
602 	i = fadump_gpr_index(reg_id);
603 	if (i >= 0)
604 		regs->gpr[i] = (unsigned long)reg_val;
605 	else if (reg_id == REG_ID("NIA"))
606 		regs->nip = (unsigned long)reg_val;
607 	else if (reg_id == REG_ID("MSR"))
608 		regs->msr = (unsigned long)reg_val;
609 	else if (reg_id == REG_ID("CTR"))
610 		regs->ctr = (unsigned long)reg_val;
611 	else if (reg_id == REG_ID("LR"))
612 		regs->link = (unsigned long)reg_val;
613 	else if (reg_id == REG_ID("XER"))
614 		regs->xer = (unsigned long)reg_val;
615 	else if (reg_id == REG_ID("CR"))
616 		regs->ccr = (unsigned long)reg_val;
617 	else if (reg_id == REG_ID("DAR"))
618 		regs->dar = (unsigned long)reg_val;
619 	else if (reg_id == REG_ID("DSISR"))
620 		regs->dsisr = (unsigned long)reg_val;
621 }
622 
623 static struct fadump_reg_entry*
fadump_read_registers(struct fadump_reg_entry * reg_entry,struct pt_regs * regs)624 fadump_read_registers(struct fadump_reg_entry *reg_entry, struct pt_regs *regs)
625 {
626 	memset(regs, 0, sizeof(struct pt_regs));
627 
628 	while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND")) {
629 		fadump_set_regval(regs, be64_to_cpu(reg_entry->reg_id),
630 					be64_to_cpu(reg_entry->reg_value));
631 		reg_entry++;
632 	}
633 	reg_entry++;
634 	return reg_entry;
635 }
636 
fadump_regs_to_elf_notes(u32 * buf,struct pt_regs * regs)637 static u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
638 {
639 	struct elf_prstatus prstatus;
640 
641 	memset(&prstatus, 0, sizeof(prstatus));
642 	/*
643 	 * FIXME: How do i get PID? Do I really need it?
644 	 * prstatus.pr_pid = ????
645 	 */
646 	elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
647 	buf = append_elf_note(buf, CRASH_CORE_NOTE_NAME, NT_PRSTATUS,
648 			      &prstatus, sizeof(prstatus));
649 	return buf;
650 }
651 
fadump_update_elfcore_header(char * bufp)652 static void fadump_update_elfcore_header(char *bufp)
653 {
654 	struct elfhdr *elf;
655 	struct elf_phdr *phdr;
656 
657 	elf = (struct elfhdr *)bufp;
658 	bufp += sizeof(struct elfhdr);
659 
660 	/* First note is a place holder for cpu notes info. */
661 	phdr = (struct elf_phdr *)bufp;
662 
663 	if (phdr->p_type == PT_NOTE) {
664 		phdr->p_paddr = fw_dump.cpu_notes_buf;
665 		phdr->p_offset	= phdr->p_paddr;
666 		phdr->p_filesz	= fw_dump.cpu_notes_buf_size;
667 		phdr->p_memsz = fw_dump.cpu_notes_buf_size;
668 	}
669 	return;
670 }
671 
fadump_cpu_notes_buf_alloc(unsigned long size)672 static void *fadump_cpu_notes_buf_alloc(unsigned long size)
673 {
674 	void *vaddr;
675 	struct page *page;
676 	unsigned long order, count, i;
677 
678 	order = get_order(size);
679 	vaddr = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
680 	if (!vaddr)
681 		return NULL;
682 
683 	count = 1 << order;
684 	page = virt_to_page(vaddr);
685 	for (i = 0; i < count; i++)
686 		SetPageReserved(page + i);
687 	return vaddr;
688 }
689 
fadump_cpu_notes_buf_free(unsigned long vaddr,unsigned long size)690 static void fadump_cpu_notes_buf_free(unsigned long vaddr, unsigned long size)
691 {
692 	struct page *page;
693 	unsigned long order, count, i;
694 
695 	order = get_order(size);
696 	count = 1 << order;
697 	page = virt_to_page(vaddr);
698 	for (i = 0; i < count; i++)
699 		ClearPageReserved(page + i);
700 	__free_pages(page, order);
701 }
702 
703 /*
704  * Read CPU state dump data and convert it into ELF notes.
705  * The CPU dump starts with magic number "REGSAVE". NumCpusOffset should be
706  * used to access the data to allow for additional fields to be added without
707  * affecting compatibility. Each list of registers for a CPU starts with
708  * "CPUSTRT" and ends with "CPUEND". Each register entry is of 16 bytes,
709  * 8 Byte ASCII identifier and 8 Byte register value. The register entry
710  * with identifier "CPUSTRT" and "CPUEND" contains 4 byte cpu id as part
711  * of register value. For more details refer to PAPR document.
712  *
713  * Only for the crashing cpu we ignore the CPU dump data and get exact
714  * state from fadump crash info structure populated by first kernel at the
715  * time of crash.
716  */
fadump_build_cpu_notes(const struct fadump_mem_struct * fdm)717 static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm)
718 {
719 	struct fadump_reg_save_area_header *reg_header;
720 	struct fadump_reg_entry *reg_entry;
721 	struct fadump_crash_info_header *fdh = NULL;
722 	void *vaddr;
723 	unsigned long addr;
724 	u32 num_cpus, *note_buf;
725 	struct pt_regs regs;
726 	int i, rc = 0, cpu = 0;
727 
728 	if (!fdm->cpu_state_data.bytes_dumped)
729 		return -EINVAL;
730 
731 	addr = be64_to_cpu(fdm->cpu_state_data.destination_address);
732 	vaddr = __va(addr);
733 
734 	reg_header = vaddr;
735 	if (be64_to_cpu(reg_header->magic_number) != REGSAVE_AREA_MAGIC) {
736 		printk(KERN_ERR "Unable to read register save area.\n");
737 		return -ENOENT;
738 	}
739 	pr_debug("--------CPU State Data------------\n");
740 	pr_debug("Magic Number: %llx\n", be64_to_cpu(reg_header->magic_number));
741 	pr_debug("NumCpuOffset: %x\n", be32_to_cpu(reg_header->num_cpu_offset));
742 
743 	vaddr += be32_to_cpu(reg_header->num_cpu_offset);
744 	num_cpus = be32_to_cpu(*((__be32 *)(vaddr)));
745 	pr_debug("NumCpus     : %u\n", num_cpus);
746 	vaddr += sizeof(u32);
747 	reg_entry = (struct fadump_reg_entry *)vaddr;
748 
749 	/* Allocate buffer to hold cpu crash notes. */
750 	fw_dump.cpu_notes_buf_size = num_cpus * sizeof(note_buf_t);
751 	fw_dump.cpu_notes_buf_size = PAGE_ALIGN(fw_dump.cpu_notes_buf_size);
752 	note_buf = fadump_cpu_notes_buf_alloc(fw_dump.cpu_notes_buf_size);
753 	if (!note_buf) {
754 		printk(KERN_ERR "Failed to allocate 0x%lx bytes for "
755 			"cpu notes buffer\n", fw_dump.cpu_notes_buf_size);
756 		return -ENOMEM;
757 	}
758 	fw_dump.cpu_notes_buf = __pa(note_buf);
759 
760 	pr_debug("Allocated buffer for cpu notes of size %ld at %p\n",
761 			(num_cpus * sizeof(note_buf_t)), note_buf);
762 
763 	if (fw_dump.fadumphdr_addr)
764 		fdh = __va(fw_dump.fadumphdr_addr);
765 
766 	for (i = 0; i < num_cpus; i++) {
767 		if (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUSTRT")) {
768 			printk(KERN_ERR "Unable to read CPU state data\n");
769 			rc = -ENOENT;
770 			goto error_out;
771 		}
772 		/* Lower 4 bytes of reg_value contains logical cpu id */
773 		cpu = be64_to_cpu(reg_entry->reg_value) & FADUMP_CPU_ID_MASK;
774 		if (fdh && !cpumask_test_cpu(cpu, &fdh->online_mask)) {
775 			SKIP_TO_NEXT_CPU(reg_entry);
776 			continue;
777 		}
778 		pr_debug("Reading register data for cpu %d...\n", cpu);
779 		if (fdh && fdh->crashing_cpu == cpu) {
780 			regs = fdh->regs;
781 			note_buf = fadump_regs_to_elf_notes(note_buf, &regs);
782 			SKIP_TO_NEXT_CPU(reg_entry);
783 		} else {
784 			reg_entry++;
785 			reg_entry = fadump_read_registers(reg_entry, &regs);
786 			note_buf = fadump_regs_to_elf_notes(note_buf, &regs);
787 		}
788 	}
789 	final_note(note_buf);
790 
791 	if (fdh) {
792 		pr_debug("Updating elfcore header (%llx) with cpu notes\n",
793 							fdh->elfcorehdr_addr);
794 		fadump_update_elfcore_header((char *)__va(fdh->elfcorehdr_addr));
795 	}
796 	return 0;
797 
798 error_out:
799 	fadump_cpu_notes_buf_free((unsigned long)__va(fw_dump.cpu_notes_buf),
800 					fw_dump.cpu_notes_buf_size);
801 	fw_dump.cpu_notes_buf = 0;
802 	fw_dump.cpu_notes_buf_size = 0;
803 	return rc;
804 
805 }
806 
807 /*
808  * Validate and process the dump data stored by firmware before exporting
809  * it through '/proc/vmcore'.
810  */
process_fadump(const struct fadump_mem_struct * fdm_active)811 static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
812 {
813 	struct fadump_crash_info_header *fdh;
814 	int rc = 0;
815 
816 	if (!fdm_active || !fw_dump.fadumphdr_addr)
817 		return -EINVAL;
818 
819 	/* Check if the dump data is valid. */
820 	if ((be16_to_cpu(fdm_active->header.dump_status_flag) == FADUMP_ERROR_FLAG) ||
821 			(fdm_active->cpu_state_data.error_flags != 0) ||
822 			(fdm_active->rmr_region.error_flags != 0)) {
823 		printk(KERN_ERR "Dump taken by platform is not valid\n");
824 		return -EINVAL;
825 	}
826 	if ((fdm_active->rmr_region.bytes_dumped !=
827 			fdm_active->rmr_region.source_len) ||
828 			!fdm_active->cpu_state_data.bytes_dumped) {
829 		printk(KERN_ERR "Dump taken by platform is incomplete\n");
830 		return -EINVAL;
831 	}
832 
833 	/* Validate the fadump crash info header */
834 	fdh = __va(fw_dump.fadumphdr_addr);
835 	if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) {
836 		printk(KERN_ERR "Crash info header is not valid.\n");
837 		return -EINVAL;
838 	}
839 
840 	rc = fadump_build_cpu_notes(fdm_active);
841 	if (rc)
842 		return rc;
843 
844 	/*
845 	 * We are done validating dump info and elfcore header is now ready
846 	 * to be exported. set elfcorehdr_addr so that vmcore module will
847 	 * export the elfcore header through '/proc/vmcore'.
848 	 */
849 	elfcorehdr_addr = fdh->elfcorehdr_addr;
850 
851 	return 0;
852 }
853 
free_crash_memory_ranges(void)854 static void free_crash_memory_ranges(void)
855 {
856 	kfree(crash_memory_ranges);
857 	crash_memory_ranges = NULL;
858 	crash_memory_ranges_size = 0;
859 	max_crash_mem_ranges = 0;
860 }
861 
862 /*
863  * Allocate or reallocate crash memory ranges array in incremental units
864  * of PAGE_SIZE.
865  */
allocate_crash_memory_ranges(void)866 static int allocate_crash_memory_ranges(void)
867 {
868 	struct fad_crash_memory_ranges *new_array;
869 	u64 new_size;
870 
871 	new_size = crash_memory_ranges_size + PAGE_SIZE;
872 	pr_debug("Allocating %llu bytes of memory for crash memory ranges\n",
873 		 new_size);
874 
875 	new_array = krealloc(crash_memory_ranges, new_size, GFP_KERNEL);
876 	if (new_array == NULL) {
877 		pr_err("Insufficient memory for setting up crash memory ranges\n");
878 		free_crash_memory_ranges();
879 		return -ENOMEM;
880 	}
881 
882 	crash_memory_ranges = new_array;
883 	crash_memory_ranges_size = new_size;
884 	max_crash_mem_ranges = (new_size /
885 				sizeof(struct fad_crash_memory_ranges));
886 	return 0;
887 }
888 
fadump_add_crash_memory(unsigned long long base,unsigned long long end)889 static inline int fadump_add_crash_memory(unsigned long long base,
890 					  unsigned long long end)
891 {
892 	if (base == end)
893 		return 0;
894 
895 	if (crash_mem_ranges == max_crash_mem_ranges) {
896 		int ret;
897 
898 		ret = allocate_crash_memory_ranges();
899 		if (ret)
900 			return ret;
901 	}
902 
903 	pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
904 		crash_mem_ranges, base, end - 1, (end - base));
905 	crash_memory_ranges[crash_mem_ranges].base = base;
906 	crash_memory_ranges[crash_mem_ranges].size = end - base;
907 	crash_mem_ranges++;
908 	return 0;
909 }
910 
fadump_exclude_reserved_area(unsigned long long start,unsigned long long end)911 static int fadump_exclude_reserved_area(unsigned long long start,
912 					unsigned long long end)
913 {
914 	unsigned long long ra_start, ra_end;
915 	int ret = 0;
916 
917 	ra_start = fw_dump.reserve_dump_area_start;
918 	ra_end = ra_start + fw_dump.reserve_dump_area_size;
919 
920 	if ((ra_start < end) && (ra_end > start)) {
921 		if ((start < ra_start) && (end > ra_end)) {
922 			ret = fadump_add_crash_memory(start, ra_start);
923 			if (ret)
924 				return ret;
925 
926 			ret = fadump_add_crash_memory(ra_end, end);
927 		} else if (start < ra_start) {
928 			ret = fadump_add_crash_memory(start, ra_start);
929 		} else if (ra_end < end) {
930 			ret = fadump_add_crash_memory(ra_end, end);
931 		}
932 	} else
933 		ret = fadump_add_crash_memory(start, end);
934 
935 	return ret;
936 }
937 
fadump_init_elfcore_header(char * bufp)938 static int fadump_init_elfcore_header(char *bufp)
939 {
940 	struct elfhdr *elf;
941 
942 	elf = (struct elfhdr *) bufp;
943 	bufp += sizeof(struct elfhdr);
944 	memcpy(elf->e_ident, ELFMAG, SELFMAG);
945 	elf->e_ident[EI_CLASS] = ELF_CLASS;
946 	elf->e_ident[EI_DATA] = ELF_DATA;
947 	elf->e_ident[EI_VERSION] = EV_CURRENT;
948 	elf->e_ident[EI_OSABI] = ELF_OSABI;
949 	memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
950 	elf->e_type = ET_CORE;
951 	elf->e_machine = ELF_ARCH;
952 	elf->e_version = EV_CURRENT;
953 	elf->e_entry = 0;
954 	elf->e_phoff = sizeof(struct elfhdr);
955 	elf->e_shoff = 0;
956 #if defined(_CALL_ELF)
957 	elf->e_flags = _CALL_ELF;
958 #else
959 	elf->e_flags = 0;
960 #endif
961 	elf->e_ehsize = sizeof(struct elfhdr);
962 	elf->e_phentsize = sizeof(struct elf_phdr);
963 	elf->e_phnum = 0;
964 	elf->e_shentsize = 0;
965 	elf->e_shnum = 0;
966 	elf->e_shstrndx = 0;
967 
968 	return 0;
969 }
970 
971 /*
972  * Traverse through memblock structure and setup crash memory ranges. These
973  * ranges will be used create PT_LOAD program headers in elfcore header.
974  */
fadump_setup_crash_memory_ranges(void)975 static int fadump_setup_crash_memory_ranges(void)
976 {
977 	struct memblock_region *reg;
978 	unsigned long long start, end;
979 	int ret;
980 
981 	pr_debug("Setup crash memory ranges.\n");
982 	crash_mem_ranges = 0;
983 	/*
984 	 * add the first memory chunk (RMA_START through boot_memory_size) as
985 	 * a separate memory chunk. The reason is, at the time crash firmware
986 	 * will move the content of this memory chunk to different location
987 	 * specified during fadump registration. We need to create a separate
988 	 * program header for this chunk with the correct offset.
989 	 */
990 	ret = fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
991 	if (ret)
992 		return ret;
993 
994 	for_each_memblock(memory, reg) {
995 		start = (unsigned long long)reg->base;
996 		end = start + (unsigned long long)reg->size;
997 
998 		/*
999 		 * skip the first memory chunk that is already added (RMA_START
1000 		 * through boot_memory_size). This logic needs a relook if and
1001 		 * when RMA_START changes to a non-zero value.
1002 		 */
1003 		BUILD_BUG_ON(RMA_START != 0);
1004 		if (start < fw_dump.boot_memory_size) {
1005 			if (end > fw_dump.boot_memory_size)
1006 				start = fw_dump.boot_memory_size;
1007 			else
1008 				continue;
1009 		}
1010 
1011 		/* add this range excluding the reserved dump area. */
1012 		ret = fadump_exclude_reserved_area(start, end);
1013 		if (ret)
1014 			return ret;
1015 	}
1016 
1017 	return 0;
1018 }
1019 
1020 /*
1021  * If the given physical address falls within the boot memory region then
1022  * return the relocated address that points to the dump region reserved
1023  * for saving initial boot memory contents.
1024  */
fadump_relocate(unsigned long paddr)1025 static inline unsigned long fadump_relocate(unsigned long paddr)
1026 {
1027 	if (paddr > RMA_START && paddr < fw_dump.boot_memory_size)
1028 		return be64_to_cpu(fdm.rmr_region.destination_address) + paddr;
1029 	else
1030 		return paddr;
1031 }
1032 
fadump_create_elfcore_headers(char * bufp)1033 static int fadump_create_elfcore_headers(char *bufp)
1034 {
1035 	struct elfhdr *elf;
1036 	struct elf_phdr *phdr;
1037 	int i;
1038 
1039 	fadump_init_elfcore_header(bufp);
1040 	elf = (struct elfhdr *)bufp;
1041 	bufp += sizeof(struct elfhdr);
1042 
1043 	/*
1044 	 * setup ELF PT_NOTE, place holder for cpu notes info. The notes info
1045 	 * will be populated during second kernel boot after crash. Hence
1046 	 * this PT_NOTE will always be the first elf note.
1047 	 *
1048 	 * NOTE: Any new ELF note addition should be placed after this note.
1049 	 */
1050 	phdr = (struct elf_phdr *)bufp;
1051 	bufp += sizeof(struct elf_phdr);
1052 	phdr->p_type = PT_NOTE;
1053 	phdr->p_flags = 0;
1054 	phdr->p_vaddr = 0;
1055 	phdr->p_align = 0;
1056 
1057 	phdr->p_offset = 0;
1058 	phdr->p_paddr = 0;
1059 	phdr->p_filesz = 0;
1060 	phdr->p_memsz = 0;
1061 
1062 	(elf->e_phnum)++;
1063 
1064 	/* setup ELF PT_NOTE for vmcoreinfo */
1065 	phdr = (struct elf_phdr *)bufp;
1066 	bufp += sizeof(struct elf_phdr);
1067 	phdr->p_type	= PT_NOTE;
1068 	phdr->p_flags	= 0;
1069 	phdr->p_vaddr	= 0;
1070 	phdr->p_align	= 0;
1071 
1072 	phdr->p_paddr	= fadump_relocate(paddr_vmcoreinfo_note());
1073 	phdr->p_offset	= phdr->p_paddr;
1074 	phdr->p_memsz	= phdr->p_filesz = VMCOREINFO_NOTE_SIZE;
1075 
1076 	/* Increment number of program headers. */
1077 	(elf->e_phnum)++;
1078 
1079 	/* setup PT_LOAD sections. */
1080 
1081 	for (i = 0; i < crash_mem_ranges; i++) {
1082 		unsigned long long mbase, msize;
1083 		mbase = crash_memory_ranges[i].base;
1084 		msize = crash_memory_ranges[i].size;
1085 
1086 		if (!msize)
1087 			continue;
1088 
1089 		phdr = (struct elf_phdr *)bufp;
1090 		bufp += sizeof(struct elf_phdr);
1091 		phdr->p_type	= PT_LOAD;
1092 		phdr->p_flags	= PF_R|PF_W|PF_X;
1093 		phdr->p_offset	= mbase;
1094 
1095 		if (mbase == RMA_START) {
1096 			/*
1097 			 * The entire RMA region will be moved by firmware
1098 			 * to the specified destination_address. Hence set
1099 			 * the correct offset.
1100 			 */
1101 			phdr->p_offset = be64_to_cpu(fdm.rmr_region.destination_address);
1102 		}
1103 
1104 		phdr->p_paddr = mbase;
1105 		phdr->p_vaddr = (unsigned long)__va(mbase);
1106 		phdr->p_filesz = msize;
1107 		phdr->p_memsz = msize;
1108 		phdr->p_align = 0;
1109 
1110 		/* Increment number of program headers. */
1111 		(elf->e_phnum)++;
1112 	}
1113 	return 0;
1114 }
1115 
init_fadump_header(unsigned long addr)1116 static unsigned long init_fadump_header(unsigned long addr)
1117 {
1118 	struct fadump_crash_info_header *fdh;
1119 
1120 	if (!addr)
1121 		return 0;
1122 
1123 	fw_dump.fadumphdr_addr = addr;
1124 	fdh = __va(addr);
1125 	addr += sizeof(struct fadump_crash_info_header);
1126 
1127 	memset(fdh, 0, sizeof(struct fadump_crash_info_header));
1128 	fdh->magic_number = FADUMP_CRASH_INFO_MAGIC;
1129 	fdh->elfcorehdr_addr = addr;
1130 	/* We will set the crashing cpu id in crash_fadump() during crash. */
1131 	fdh->crashing_cpu = CPU_UNKNOWN;
1132 
1133 	return addr;
1134 }
1135 
register_fadump(void)1136 static int register_fadump(void)
1137 {
1138 	unsigned long addr;
1139 	void *vaddr;
1140 	int ret;
1141 
1142 	/*
1143 	 * If no memory is reserved then we can not register for firmware-
1144 	 * assisted dump.
1145 	 */
1146 	if (!fw_dump.reserve_dump_area_size)
1147 		return -ENODEV;
1148 
1149 	ret = fadump_setup_crash_memory_ranges();
1150 	if (ret)
1151 		return ret;
1152 
1153 	addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
1154 	/* Initialize fadump crash info header. */
1155 	addr = init_fadump_header(addr);
1156 	vaddr = __va(addr);
1157 
1158 	pr_debug("Creating ELF core headers at %#016lx\n", addr);
1159 	fadump_create_elfcore_headers(vaddr);
1160 
1161 	/* register the future kernel dump with firmware. */
1162 	return register_fw_dump(&fdm);
1163 }
1164 
fadump_unregister_dump(struct fadump_mem_struct * fdm)1165 static int fadump_unregister_dump(struct fadump_mem_struct *fdm)
1166 {
1167 	int rc = 0;
1168 	unsigned int wait_time;
1169 
1170 	pr_debug("Un-register firmware-assisted dump\n");
1171 
1172 	/* TODO: Add upper time limit for the delay */
1173 	do {
1174 		rc = rtas_call(fw_dump.ibm_configure_kernel_dump, 3, 1, NULL,
1175 			FADUMP_UNREGISTER, fdm,
1176 			sizeof(struct fadump_mem_struct));
1177 
1178 		wait_time = rtas_busy_delay_time(rc);
1179 		if (wait_time)
1180 			mdelay(wait_time);
1181 	} while (wait_time);
1182 
1183 	if (rc) {
1184 		printk(KERN_ERR "Failed to un-register firmware-assisted dump."
1185 			" unexpected error(%d).\n", rc);
1186 		return rc;
1187 	}
1188 	fw_dump.dump_registered = 0;
1189 	return 0;
1190 }
1191 
fadump_invalidate_dump(struct fadump_mem_struct * fdm)1192 static int fadump_invalidate_dump(struct fadump_mem_struct *fdm)
1193 {
1194 	int rc = 0;
1195 	unsigned int wait_time;
1196 
1197 	pr_debug("Invalidating firmware-assisted dump registration\n");
1198 
1199 	/* TODO: Add upper time limit for the delay */
1200 	do {
1201 		rc = rtas_call(fw_dump.ibm_configure_kernel_dump, 3, 1, NULL,
1202 			FADUMP_INVALIDATE, fdm,
1203 			sizeof(struct fadump_mem_struct));
1204 
1205 		wait_time = rtas_busy_delay_time(rc);
1206 		if (wait_time)
1207 			mdelay(wait_time);
1208 	} while (wait_time);
1209 
1210 	if (rc) {
1211 		pr_err("Failed to invalidate firmware-assisted dump registration. Unexpected error (%d).\n", rc);
1212 		return rc;
1213 	}
1214 	fw_dump.dump_active = 0;
1215 	fdm_active = NULL;
1216 	return 0;
1217 }
1218 
fadump_cleanup(void)1219 void fadump_cleanup(void)
1220 {
1221 	/* Invalidate the registration only if dump is active. */
1222 	if (fw_dump.dump_active) {
1223 		init_fadump_mem_struct(&fdm,
1224 			be64_to_cpu(fdm_active->cpu_state_data.destination_address));
1225 		fadump_invalidate_dump(&fdm);
1226 	} else if (fw_dump.dump_registered) {
1227 		/* Un-register Firmware-assisted dump if it was registered. */
1228 		fadump_unregister_dump(&fdm);
1229 		free_crash_memory_ranges();
1230 	}
1231 }
1232 
fadump_free_reserved_memory(unsigned long start_pfn,unsigned long end_pfn)1233 static void fadump_free_reserved_memory(unsigned long start_pfn,
1234 					unsigned long end_pfn)
1235 {
1236 	unsigned long pfn;
1237 	unsigned long time_limit = jiffies + HZ;
1238 
1239 	pr_info("freeing reserved memory (0x%llx - 0x%llx)\n",
1240 		PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
1241 
1242 	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1243 		free_reserved_page(pfn_to_page(pfn));
1244 
1245 		if (time_after(jiffies, time_limit)) {
1246 			cond_resched();
1247 			time_limit = jiffies + HZ;
1248 		}
1249 	}
1250 }
1251 
1252 /*
1253  * Skip memory holes and free memory that was actually reserved.
1254  */
fadump_release_reserved_area(unsigned long start,unsigned long end)1255 static void fadump_release_reserved_area(unsigned long start, unsigned long end)
1256 {
1257 	struct memblock_region *reg;
1258 	unsigned long tstart, tend;
1259 	unsigned long start_pfn = PHYS_PFN(start);
1260 	unsigned long end_pfn = PHYS_PFN(end);
1261 
1262 	for_each_memblock(memory, reg) {
1263 		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
1264 		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
1265 		if (tstart < tend) {
1266 			fadump_free_reserved_memory(tstart, tend);
1267 
1268 			if (tend == end_pfn)
1269 				break;
1270 
1271 			start_pfn = tend + 1;
1272 		}
1273 	}
1274 }
1275 
1276 /*
1277  * Release the memory that was reserved in early boot to preserve the memory
1278  * contents. The released memory will be available for general use.
1279  */
fadump_release_memory(unsigned long begin,unsigned long end)1280 static void fadump_release_memory(unsigned long begin, unsigned long end)
1281 {
1282 	unsigned long ra_start, ra_end;
1283 
1284 	ra_start = fw_dump.reserve_dump_area_start;
1285 	ra_end = ra_start + fw_dump.reserve_dump_area_size;
1286 
1287 	/*
1288 	 * exclude the dump reserve area. Will reuse it for next
1289 	 * fadump registration.
1290 	 */
1291 	if (begin < ra_end && end > ra_start) {
1292 		if (begin < ra_start)
1293 			fadump_release_reserved_area(begin, ra_start);
1294 		if (end > ra_end)
1295 			fadump_release_reserved_area(ra_end, end);
1296 	} else
1297 		fadump_release_reserved_area(begin, end);
1298 }
1299 
fadump_invalidate_release_mem(void)1300 static void fadump_invalidate_release_mem(void)
1301 {
1302 	unsigned long reserved_area_start, reserved_area_end;
1303 	unsigned long destination_address;
1304 
1305 	mutex_lock(&fadump_mutex);
1306 	if (!fw_dump.dump_active) {
1307 		mutex_unlock(&fadump_mutex);
1308 		return;
1309 	}
1310 
1311 	destination_address = be64_to_cpu(fdm_active->cpu_state_data.destination_address);
1312 	fadump_cleanup();
1313 	mutex_unlock(&fadump_mutex);
1314 
1315 	/*
1316 	 * Save the current reserved memory bounds we will require them
1317 	 * later for releasing the memory for general use.
1318 	 */
1319 	reserved_area_start = fw_dump.reserve_dump_area_start;
1320 	reserved_area_end = reserved_area_start +
1321 			fw_dump.reserve_dump_area_size;
1322 	/*
1323 	 * Setup reserve_dump_area_start and its size so that we can
1324 	 * reuse this reserved memory for Re-registration.
1325 	 */
1326 	fw_dump.reserve_dump_area_start = destination_address;
1327 	fw_dump.reserve_dump_area_size = get_fadump_area_size();
1328 
1329 	fadump_release_memory(reserved_area_start, reserved_area_end);
1330 	if (fw_dump.cpu_notes_buf) {
1331 		fadump_cpu_notes_buf_free(
1332 				(unsigned long)__va(fw_dump.cpu_notes_buf),
1333 				fw_dump.cpu_notes_buf_size);
1334 		fw_dump.cpu_notes_buf = 0;
1335 		fw_dump.cpu_notes_buf_size = 0;
1336 	}
1337 	/* Initialize the kernel dump memory structure for FAD registration. */
1338 	init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start);
1339 }
1340 
fadump_release_memory_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)1341 static ssize_t fadump_release_memory_store(struct kobject *kobj,
1342 					struct kobj_attribute *attr,
1343 					const char *buf, size_t count)
1344 {
1345 	if (!fw_dump.dump_active)
1346 		return -EPERM;
1347 
1348 	if (buf[0] == '1') {
1349 		/*
1350 		 * Take away the '/proc/vmcore'. We are releasing the dump
1351 		 * memory, hence it will not be valid anymore.
1352 		 */
1353 #ifdef CONFIG_PROC_VMCORE
1354 		vmcore_cleanup();
1355 #endif
1356 		fadump_invalidate_release_mem();
1357 
1358 	} else
1359 		return -EINVAL;
1360 	return count;
1361 }
1362 
fadump_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1363 static ssize_t fadump_enabled_show(struct kobject *kobj,
1364 					struct kobj_attribute *attr,
1365 					char *buf)
1366 {
1367 	return sprintf(buf, "%d\n", fw_dump.fadump_enabled);
1368 }
1369 
fadump_register_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)1370 static ssize_t fadump_register_show(struct kobject *kobj,
1371 					struct kobj_attribute *attr,
1372 					char *buf)
1373 {
1374 	return sprintf(buf, "%d\n", fw_dump.dump_registered);
1375 }
1376 
fadump_register_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)1377 static ssize_t fadump_register_store(struct kobject *kobj,
1378 					struct kobj_attribute *attr,
1379 					const char *buf, size_t count)
1380 {
1381 	int ret = 0;
1382 
1383 	if (!fw_dump.fadump_enabled || fdm_active)
1384 		return -EPERM;
1385 
1386 	mutex_lock(&fadump_mutex);
1387 
1388 	switch (buf[0]) {
1389 	case '0':
1390 		if (fw_dump.dump_registered == 0) {
1391 			goto unlock_out;
1392 		}
1393 		/* Un-register Firmware-assisted dump */
1394 		fadump_unregister_dump(&fdm);
1395 		break;
1396 	case '1':
1397 		if (fw_dump.dump_registered == 1) {
1398 			ret = -EEXIST;
1399 			goto unlock_out;
1400 		}
1401 		/* Register Firmware-assisted dump */
1402 		ret = register_fadump();
1403 		break;
1404 	default:
1405 		ret = -EINVAL;
1406 		break;
1407 	}
1408 
1409 unlock_out:
1410 	mutex_unlock(&fadump_mutex);
1411 	return ret < 0 ? ret : count;
1412 }
1413 
fadump_region_show(struct seq_file * m,void * private)1414 static int fadump_region_show(struct seq_file *m, void *private)
1415 {
1416 	const struct fadump_mem_struct *fdm_ptr;
1417 
1418 	if (!fw_dump.fadump_enabled)
1419 		return 0;
1420 
1421 	mutex_lock(&fadump_mutex);
1422 	if (fdm_active)
1423 		fdm_ptr = fdm_active;
1424 	else {
1425 		mutex_unlock(&fadump_mutex);
1426 		fdm_ptr = &fdm;
1427 	}
1428 
1429 	seq_printf(m,
1430 			"CPU : [%#016llx-%#016llx] %#llx bytes, "
1431 			"Dumped: %#llx\n",
1432 			be64_to_cpu(fdm_ptr->cpu_state_data.destination_address),
1433 			be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) +
1434 			be64_to_cpu(fdm_ptr->cpu_state_data.source_len) - 1,
1435 			be64_to_cpu(fdm_ptr->cpu_state_data.source_len),
1436 			be64_to_cpu(fdm_ptr->cpu_state_data.bytes_dumped));
1437 	seq_printf(m,
1438 			"HPTE: [%#016llx-%#016llx] %#llx bytes, "
1439 			"Dumped: %#llx\n",
1440 			be64_to_cpu(fdm_ptr->hpte_region.destination_address),
1441 			be64_to_cpu(fdm_ptr->hpte_region.destination_address) +
1442 			be64_to_cpu(fdm_ptr->hpte_region.source_len) - 1,
1443 			be64_to_cpu(fdm_ptr->hpte_region.source_len),
1444 			be64_to_cpu(fdm_ptr->hpte_region.bytes_dumped));
1445 	seq_printf(m,
1446 			"DUMP: [%#016llx-%#016llx] %#llx bytes, "
1447 			"Dumped: %#llx\n",
1448 			be64_to_cpu(fdm_ptr->rmr_region.destination_address),
1449 			be64_to_cpu(fdm_ptr->rmr_region.destination_address) +
1450 			be64_to_cpu(fdm_ptr->rmr_region.source_len) - 1,
1451 			be64_to_cpu(fdm_ptr->rmr_region.source_len),
1452 			be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped));
1453 
1454 	if (!fdm_active ||
1455 		(fw_dump.reserve_dump_area_start ==
1456 		be64_to_cpu(fdm_ptr->cpu_state_data.destination_address)))
1457 		goto out;
1458 
1459 	/* Dump is active. Show reserved memory region. */
1460 	seq_printf(m,
1461 			"    : [%#016llx-%#016llx] %#llx bytes, "
1462 			"Dumped: %#llx\n",
1463 			(unsigned long long)fw_dump.reserve_dump_area_start,
1464 			be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - 1,
1465 			be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) -
1466 			fw_dump.reserve_dump_area_start,
1467 			be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) -
1468 			fw_dump.reserve_dump_area_start);
1469 out:
1470 	if (fdm_active)
1471 		mutex_unlock(&fadump_mutex);
1472 	return 0;
1473 }
1474 
1475 static struct kobj_attribute fadump_release_attr = __ATTR(fadump_release_mem,
1476 						0200, NULL,
1477 						fadump_release_memory_store);
1478 static struct kobj_attribute fadump_attr = __ATTR(fadump_enabled,
1479 						0444, fadump_enabled_show,
1480 						NULL);
1481 static struct kobj_attribute fadump_register_attr = __ATTR(fadump_registered,
1482 						0644, fadump_register_show,
1483 						fadump_register_store);
1484 
fadump_region_open(struct inode * inode,struct file * file)1485 static int fadump_region_open(struct inode *inode, struct file *file)
1486 {
1487 	return single_open(file, fadump_region_show, inode->i_private);
1488 }
1489 
1490 static const struct file_operations fadump_region_fops = {
1491 	.open    = fadump_region_open,
1492 	.read    = seq_read,
1493 	.llseek  = seq_lseek,
1494 	.release = single_release,
1495 };
1496 
fadump_init_files(void)1497 static void fadump_init_files(void)
1498 {
1499 	struct dentry *debugfs_file;
1500 	int rc = 0;
1501 
1502 	rc = sysfs_create_file(kernel_kobj, &fadump_attr.attr);
1503 	if (rc)
1504 		printk(KERN_ERR "fadump: unable to create sysfs file"
1505 			" fadump_enabled (%d)\n", rc);
1506 
1507 	rc = sysfs_create_file(kernel_kobj, &fadump_register_attr.attr);
1508 	if (rc)
1509 		printk(KERN_ERR "fadump: unable to create sysfs file"
1510 			" fadump_registered (%d)\n", rc);
1511 
1512 	debugfs_file = debugfs_create_file("fadump_region", 0444,
1513 					powerpc_debugfs_root, NULL,
1514 					&fadump_region_fops);
1515 	if (!debugfs_file)
1516 		printk(KERN_ERR "fadump: unable to create debugfs file"
1517 				" fadump_region\n");
1518 
1519 	if (fw_dump.dump_active) {
1520 		rc = sysfs_create_file(kernel_kobj, &fadump_release_attr.attr);
1521 		if (rc)
1522 			printk(KERN_ERR "fadump: unable to create sysfs file"
1523 				" fadump_release_mem (%d)\n", rc);
1524 	}
1525 	return;
1526 }
1527 
1528 /*
1529  * Prepare for firmware-assisted dump.
1530  */
setup_fadump(void)1531 int __init setup_fadump(void)
1532 {
1533 	if (!fw_dump.fadump_enabled)
1534 		return 0;
1535 
1536 	if (!fw_dump.fadump_supported) {
1537 		printk(KERN_ERR "Firmware-assisted dump is not supported on"
1538 			" this hardware\n");
1539 		return 0;
1540 	}
1541 
1542 	fadump_show_config();
1543 	/*
1544 	 * If dump data is available then see if it is valid and prepare for
1545 	 * saving it to the disk.
1546 	 */
1547 	if (fw_dump.dump_active) {
1548 		/*
1549 		 * if dump process fails then invalidate the registration
1550 		 * and release memory before proceeding for re-registration.
1551 		 */
1552 		if (process_fadump(fdm_active) < 0)
1553 			fadump_invalidate_release_mem();
1554 	}
1555 	/* Initialize the kernel dump memory structure for FAD registration. */
1556 	else if (fw_dump.reserve_dump_area_size)
1557 		init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start);
1558 	fadump_init_files();
1559 
1560 	return 1;
1561 }
1562 subsys_initcall(setup_fadump);
1563