• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Code to handle transition of Linux booting another kernel.
4  *
5  * Copyright (C) 2002-2003 Eric Biederman  <ebiederm@xmission.com>
6  * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
7  * Copyright (C) 2005 IBM Corporation.
8  */
9 
10 #include <linux/kexec.h>
11 #include <linux/reboot.h>
12 #include <linux/threads.h>
13 #include <linux/memblock.h>
14 #include <linux/of.h>
15 #include <linux/irq.h>
16 #include <linux/ftrace.h>
17 
18 #include <asm/kdump.h>
19 #include <asm/machdep.h>
20 #include <asm/pgalloc.h>
21 #include <asm/prom.h>
22 #include <asm/sections.h>
23 
machine_kexec_mask_interrupts(void)24 void machine_kexec_mask_interrupts(void) {
25 	unsigned int i;
26 	struct irq_desc *desc;
27 
28 	for_each_irq_desc(i, desc) {
29 		struct irq_chip *chip;
30 
31 		chip = irq_desc_get_chip(desc);
32 		if (!chip)
33 			continue;
34 
35 		if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
36 			chip->irq_eoi(&desc->irq_data);
37 
38 		if (chip->irq_mask)
39 			chip->irq_mask(&desc->irq_data);
40 
41 		if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
42 			chip->irq_disable(&desc->irq_data);
43 	}
44 }
45 
machine_crash_shutdown(struct pt_regs * regs)46 void machine_crash_shutdown(struct pt_regs *regs)
47 {
48 	default_machine_crash_shutdown(regs);
49 }
50 
51 /*
52  * Do what every setup is needed on image and the
53  * reboot code buffer to allow us to avoid allocations
54  * later.
55  */
machine_kexec_prepare(struct kimage * image)56 int machine_kexec_prepare(struct kimage *image)
57 {
58 	if (ppc_md.machine_kexec_prepare)
59 		return ppc_md.machine_kexec_prepare(image);
60 	else
61 		return default_machine_kexec_prepare(image);
62 }
63 
machine_kexec_cleanup(struct kimage * image)64 void machine_kexec_cleanup(struct kimage *image)
65 {
66 }
67 
arch_crash_save_vmcoreinfo(void)68 void arch_crash_save_vmcoreinfo(void)
69 {
70 
71 #ifdef CONFIG_NEED_MULTIPLE_NODES
72 	VMCOREINFO_SYMBOL(node_data);
73 	VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
74 #endif
75 #ifndef CONFIG_NEED_MULTIPLE_NODES
76 	VMCOREINFO_SYMBOL(contig_page_data);
77 #endif
78 #if defined(CONFIG_PPC64) && defined(CONFIG_SPARSEMEM_VMEMMAP)
79 	VMCOREINFO_SYMBOL(vmemmap_list);
80 	VMCOREINFO_SYMBOL(mmu_vmemmap_psize);
81 	VMCOREINFO_SYMBOL(mmu_psize_defs);
82 	VMCOREINFO_STRUCT_SIZE(vmemmap_backing);
83 	VMCOREINFO_OFFSET(vmemmap_backing, list);
84 	VMCOREINFO_OFFSET(vmemmap_backing, phys);
85 	VMCOREINFO_OFFSET(vmemmap_backing, virt_addr);
86 	VMCOREINFO_STRUCT_SIZE(mmu_psize_def);
87 	VMCOREINFO_OFFSET(mmu_psize_def, shift);
88 #endif
89 	vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
90 }
91 
92 /*
93  * Do not allocate memory (or fail in any way) in machine_kexec().
94  * We are past the point of no return, committed to rebooting now.
95  */
machine_kexec(struct kimage * image)96 void machine_kexec(struct kimage *image)
97 {
98 	int save_ftrace_enabled;
99 
100 	save_ftrace_enabled = __ftrace_enabled_save();
101 	this_cpu_disable_ftrace();
102 
103 	if (ppc_md.machine_kexec)
104 		ppc_md.machine_kexec(image);
105 	else
106 		default_machine_kexec(image);
107 
108 	this_cpu_enable_ftrace();
109 	__ftrace_enabled_restore(save_ftrace_enabled);
110 
111 	/* Fall back to normal restart if we're still alive. */
112 	machine_restart(NULL);
113 	for(;;);
114 }
115 
reserve_crashkernel(void)116 void __init reserve_crashkernel(void)
117 {
118 	unsigned long long crash_size, crash_base, total_mem_sz;
119 	int ret;
120 
121 	total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size();
122 	/* use common parsing */
123 	ret = parse_crashkernel(boot_command_line, total_mem_sz,
124 			&crash_size, &crash_base);
125 	if (ret == 0 && crash_size > 0) {
126 		crashk_res.start = crash_base;
127 		crashk_res.end = crash_base + crash_size - 1;
128 	}
129 
130 	if (crashk_res.end == crashk_res.start) {
131 		crashk_res.start = crashk_res.end = 0;
132 		return;
133 	}
134 
135 	/* We might have got these values via the command line or the
136 	 * device tree, either way sanitise them now. */
137 
138 	crash_size = resource_size(&crashk_res);
139 
140 #ifndef CONFIG_NONSTATIC_KERNEL
141 	if (crashk_res.start != KDUMP_KERNELBASE)
142 		printk("Crash kernel location must be 0x%x\n",
143 				KDUMP_KERNELBASE);
144 
145 	crashk_res.start = KDUMP_KERNELBASE;
146 #else
147 	if (!crashk_res.start) {
148 #ifdef CONFIG_PPC64
149 		/*
150 		 * On the LPAR platform place the crash kernel to mid of
151 		 * RMA size (512MB or more) to ensure the crash kernel
152 		 * gets enough space to place itself and some stack to be
153 		 * in the first segment. At the same time normal kernel
154 		 * also get enough space to allocate memory for essential
155 		 * system resource in the first segment. Keep the crash
156 		 * kernel starts at 128MB offset on other platforms.
157 		 */
158 		if (firmware_has_feature(FW_FEATURE_LPAR))
159 			crashk_res.start = ppc64_rma_size / 2;
160 		else
161 			crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2));
162 #else
163 		crashk_res.start = KDUMP_KERNELBASE;
164 #endif
165 	}
166 
167 	crash_base = PAGE_ALIGN(crashk_res.start);
168 	if (crash_base != crashk_res.start) {
169 		printk("Crash kernel base must be aligned to 0x%lx\n",
170 				PAGE_SIZE);
171 		crashk_res.start = crash_base;
172 	}
173 
174 #endif
175 	crash_size = PAGE_ALIGN(crash_size);
176 	crashk_res.end = crashk_res.start + crash_size - 1;
177 
178 	/* The crash region must not overlap the current kernel */
179 	if (overlaps_crashkernel(__pa(_stext), _end - _stext)) {
180 		printk(KERN_WARNING
181 			"Crash kernel can not overlap current kernel\n");
182 		crashk_res.start = crashk_res.end = 0;
183 		return;
184 	}
185 
186 	/* Crash kernel trumps memory limit */
187 	if (memory_limit && memory_limit <= crashk_res.end) {
188 		memory_limit = crashk_res.end + 1;
189 		total_mem_sz = memory_limit;
190 		printk("Adjusted memory limit for crashkernel, now 0x%llx\n",
191 		       memory_limit);
192 	}
193 
194 	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
195 			"for crashkernel (System RAM: %ldMB)\n",
196 			(unsigned long)(crash_size >> 20),
197 			(unsigned long)(crashk_res.start >> 20),
198 			(unsigned long)(total_mem_sz >> 20));
199 
200 	if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
201 	    memblock_reserve(crashk_res.start, crash_size)) {
202 		pr_err("Failed to reserve memory for crashkernel!\n");
203 		crashk_res.start = crashk_res.end = 0;
204 		return;
205 	}
206 }
207 
overlaps_crashkernel(unsigned long start,unsigned long size)208 int overlaps_crashkernel(unsigned long start, unsigned long size)
209 {
210 	return (start + size) > crashk_res.start && start <= crashk_res.end;
211 }
212 
213 /* Values we need to export to the second kernel via the device tree. */
214 static phys_addr_t kernel_end;
215 static phys_addr_t crashk_base;
216 static phys_addr_t crashk_size;
217 static unsigned long long mem_limit;
218 
219 static struct property kernel_end_prop = {
220 	.name = "linux,kernel-end",
221 	.length = sizeof(phys_addr_t),
222 	.value = &kernel_end,
223 };
224 
225 static struct property crashk_base_prop = {
226 	.name = "linux,crashkernel-base",
227 	.length = sizeof(phys_addr_t),
228 	.value = &crashk_base
229 };
230 
231 static struct property crashk_size_prop = {
232 	.name = "linux,crashkernel-size",
233 	.length = sizeof(phys_addr_t),
234 	.value = &crashk_size,
235 };
236 
237 static struct property memory_limit_prop = {
238 	.name = "linux,memory-limit",
239 	.length = sizeof(unsigned long long),
240 	.value = &mem_limit,
241 };
242 
243 #define cpu_to_be_ulong	__PASTE(cpu_to_be, BITS_PER_LONG)
244 
export_crashk_values(struct device_node * node)245 static void __init export_crashk_values(struct device_node *node)
246 {
247 	/* There might be existing crash kernel properties, but we can't
248 	 * be sure what's in them, so remove them. */
249 	of_remove_property(node, of_find_property(node,
250 				"linux,crashkernel-base", NULL));
251 	of_remove_property(node, of_find_property(node,
252 				"linux,crashkernel-size", NULL));
253 
254 	if (crashk_res.start != 0) {
255 		crashk_base = cpu_to_be_ulong(crashk_res.start),
256 		of_add_property(node, &crashk_base_prop);
257 		crashk_size = cpu_to_be_ulong(resource_size(&crashk_res));
258 		of_add_property(node, &crashk_size_prop);
259 	}
260 
261 	/*
262 	 * memory_limit is required by the kexec-tools to limit the
263 	 * crash regions to the actual memory used.
264 	 */
265 	mem_limit = cpu_to_be_ulong(memory_limit);
266 	of_update_property(node, &memory_limit_prop);
267 }
268 
kexec_setup(void)269 static int __init kexec_setup(void)
270 {
271 	struct device_node *node;
272 
273 	node = of_find_node_by_path("/chosen");
274 	if (!node)
275 		return -ENOENT;
276 
277 	/* remove any stale properties so ours can be found */
278 	of_remove_property(node, of_find_property(node, kernel_end_prop.name, NULL));
279 
280 	/* information needed by userspace when using default_machine_kexec */
281 	kernel_end = cpu_to_be_ulong(__pa(_end));
282 	of_add_property(node, &kernel_end_prop);
283 
284 	export_crashk_values(node);
285 
286 	of_node_put(node);
287 	return 0;
288 }
289 late_initcall(kexec_setup);
290