• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * PPC64 code to handle Linux booting another kernel.
3  *
4  * Copyright (C) 2004-2005, IBM Corp.
5  *
6  * Created by: Milton D Miller II
7  *
8  * This source code is licensed under the GNU General Public License,
9  * Version 2.  See the file COPYING for more details.
10  */
11 
12 
13 #include <linux/kexec.h>
14 #include <linux/smp.h>
15 #include <linux/thread_info.h>
16 #include <linux/init_task.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/cpu.h>
20 #include <linux/hardirq.h>
21 
22 #include <asm/page.h>
23 #include <asm/current.h>
24 #include <asm/machdep.h>
25 #include <asm/cacheflush.h>
26 #include <asm/paca.h>
27 #include <asm/mmu.h>
28 #include <asm/sections.h>	/* _end */
29 #include <asm/prom.h>
30 #include <asm/smp.h>
31 #include <asm/hw_breakpoint.h>
32 
default_machine_kexec_prepare(struct kimage * image)33 int default_machine_kexec_prepare(struct kimage *image)
34 {
35 	int i;
36 	unsigned long begin, end;	/* limits of segment */
37 	unsigned long low, high;	/* limits of blocked memory range */
38 	struct device_node *node;
39 	const unsigned long *basep;
40 	const unsigned int *sizep;
41 
42 	if (!ppc_md.hpte_clear_all)
43 		return -ENOENT;
44 
45 	/*
46 	 * Since we use the kernel fault handlers and paging code to
47 	 * handle the virtual mode, we must make sure no destination
48 	 * overlaps kernel static data or bss.
49 	 */
50 	for (i = 0; i < image->nr_segments; i++)
51 		if (image->segment[i].mem < __pa(_end))
52 			return -ETXTBSY;
53 
54 	/*
55 	 * For non-LPAR, we absolutely can not overwrite the mmu hash
56 	 * table, since we are still using the bolted entries in it to
57 	 * do the copy.  Check that here.
58 	 *
59 	 * It is safe if the end is below the start of the blocked
60 	 * region (end <= low), or if the beginning is after the
61 	 * end of the blocked region (begin >= high).  Use the
62 	 * boolean identity !(a || b)  === (!a && !b).
63 	 */
64 	if (htab_address) {
65 		low = __pa(htab_address);
66 		high = low + htab_size_bytes;
67 
68 		for (i = 0; i < image->nr_segments; i++) {
69 			begin = image->segment[i].mem;
70 			end = begin + image->segment[i].memsz;
71 
72 			if ((begin < high) && (end > low))
73 				return -ETXTBSY;
74 		}
75 	}
76 
77 	/* We also should not overwrite the tce tables */
78 	for_each_node_by_type(node, "pci") {
79 		basep = of_get_property(node, "linux,tce-base", NULL);
80 		sizep = of_get_property(node, "linux,tce-size", NULL);
81 		if (basep == NULL || sizep == NULL)
82 			continue;
83 
84 		low = *basep;
85 		high = low + (*sizep);
86 
87 		for (i = 0; i < image->nr_segments; i++) {
88 			begin = image->segment[i].mem;
89 			end = begin + image->segment[i].memsz;
90 
91 			if ((begin < high) && (end > low))
92 				return -ETXTBSY;
93 		}
94 	}
95 
96 	return 0;
97 }
98 
99 #define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE)
100 
copy_segments(unsigned long ind)101 static void copy_segments(unsigned long ind)
102 {
103 	unsigned long entry;
104 	unsigned long *ptr;
105 	void *dest;
106 	void *addr;
107 
108 	/*
109 	 * We rely on kexec_load to create a lists that properly
110 	 * initializes these pointers before they are used.
111 	 * We will still crash if the list is wrong, but at least
112 	 * the compiler will be quiet.
113 	 */
114 	ptr = NULL;
115 	dest = NULL;
116 
117 	for (entry = ind; !(entry & IND_DONE); entry = *ptr++) {
118 		addr = __va(entry & PAGE_MASK);
119 
120 		switch (entry & IND_FLAGS) {
121 		case IND_DESTINATION:
122 			dest = addr;
123 			break;
124 		case IND_INDIRECTION:
125 			ptr = addr;
126 			break;
127 		case IND_SOURCE:
128 			copy_page(dest, addr);
129 			dest += PAGE_SIZE;
130 		}
131 	}
132 }
133 
kexec_copy_flush(struct kimage * image)134 void kexec_copy_flush(struct kimage *image)
135 {
136 	long i, nr_segments = image->nr_segments;
137 	struct  kexec_segment ranges[KEXEC_SEGMENT_MAX];
138 
139 	/* save the ranges on the stack to efficiently flush the icache */
140 	memcpy(ranges, image->segment, sizeof(ranges));
141 
142 	/*
143 	 * After this call we may not use anything allocated in dynamic
144 	 * memory, including *image.
145 	 *
146 	 * Only globals and the stack are allowed.
147 	 */
148 	copy_segments(image->head);
149 
150 	/*
151 	 * we need to clear the icache for all dest pages sometime,
152 	 * including ones that were in place on the original copy
153 	 */
154 	for (i = 0; i < nr_segments; i++)
155 		flush_icache_range((unsigned long)__va(ranges[i].mem),
156 			(unsigned long)__va(ranges[i].mem + ranges[i].memsz));
157 }
158 
159 #ifdef CONFIG_SMP
160 
161 static int kexec_all_irq_disabled = 0;
162 
kexec_smp_down(void * arg)163 static void kexec_smp_down(void *arg)
164 {
165 	local_irq_disable();
166 	hard_irq_disable();
167 
168 	mb(); /* make sure our irqs are disabled before we say they are */
169 	get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
170 	while(kexec_all_irq_disabled == 0)
171 		cpu_relax();
172 	mb(); /* make sure all irqs are disabled before this */
173 	hw_breakpoint_disable();
174 	/*
175 	 * Now every CPU has IRQs off, we can clear out any pending
176 	 * IPIs and be sure that no more will come in after this.
177 	 */
178 	if (ppc_md.kexec_cpu_down)
179 		ppc_md.kexec_cpu_down(0, 1);
180 
181 	kexec_smp_wait();
182 	/* NOTREACHED */
183 }
184 
kexec_prepare_cpus_wait(int wait_state)185 static void kexec_prepare_cpus_wait(int wait_state)
186 {
187 	int my_cpu, i, notified=-1;
188 
189 	hw_breakpoint_disable();
190 	my_cpu = get_cpu();
191 	/* Make sure each CPU has at least made it to the state we need.
192 	 *
193 	 * FIXME: There is a (slim) chance of a problem if not all of the CPUs
194 	 * are correctly onlined.  If somehow we start a CPU on boot with RTAS
195 	 * start-cpu, but somehow that CPU doesn't write callin_cpu_map[] in
196 	 * time, the boot CPU will timeout.  If it does eventually execute
197 	 * stuff, the secondary will start up (paca[].cpu_start was written) and
198 	 * get into a peculiar state.  If the platform supports
199 	 * smp_ops->take_timebase(), the secondary CPU will probably be spinning
200 	 * in there.  If not (i.e. pseries), the secondary will continue on and
201 	 * try to online itself/idle/etc. If it survives that, we need to find
202 	 * these possible-but-not-online-but-should-be CPUs and chaperone them
203 	 * into kexec_smp_wait().
204 	 */
205 	for_each_online_cpu(i) {
206 		if (i == my_cpu)
207 			continue;
208 
209 		while (paca[i].kexec_state < wait_state) {
210 			barrier();
211 			if (i != notified) {
212 				printk(KERN_INFO "kexec: waiting for cpu %d "
213 				       "(physical %d) to enter %i state\n",
214 				       i, paca[i].hw_cpu_id, wait_state);
215 				notified = i;
216 			}
217 		}
218 	}
219 	mb();
220 }
221 
222 /*
223  * We need to make sure each present CPU is online.  The next kernel will scan
224  * the device tree and assume primary threads are online and query secondary
225  * threads via RTAS to online them if required.  If we don't online primary
226  * threads, they will be stuck.  However, we also online secondary threads as we
227  * may be using 'cede offline'.  In this case RTAS doesn't see the secondary
228  * threads as offline -- and again, these CPUs will be stuck.
229  *
230  * So, we online all CPUs that should be running, including secondary threads.
231  */
wake_offline_cpus(void)232 static void wake_offline_cpus(void)
233 {
234 	int cpu = 0;
235 
236 	for_each_present_cpu(cpu) {
237 		if (!cpu_online(cpu)) {
238 			printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
239 			       cpu);
240 			WARN_ON(cpu_up(cpu));
241 		}
242 	}
243 }
244 
kexec_prepare_cpus(void)245 static void kexec_prepare_cpus(void)
246 {
247 	wake_offline_cpus();
248 	smp_call_function(kexec_smp_down, NULL, /* wait */0);
249 	local_irq_disable();
250 	hard_irq_disable();
251 
252 	mb(); /* make sure IRQs are disabled before we say they are */
253 	get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
254 
255 	kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
256 	/* we are sure every CPU has IRQs off at this point */
257 	kexec_all_irq_disabled = 1;
258 
259 	/* after we tell the others to go down */
260 	if (ppc_md.kexec_cpu_down)
261 		ppc_md.kexec_cpu_down(0, 0);
262 
263 	/*
264 	 * Before removing MMU mappings make sure all CPUs have entered real
265 	 * mode:
266 	 */
267 	kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
268 
269 	put_cpu();
270 }
271 
272 #else /* ! SMP */
273 
kexec_prepare_cpus(void)274 static void kexec_prepare_cpus(void)
275 {
276 	/*
277 	 * move the secondarys to us so that we can copy
278 	 * the new kernel 0-0x100 safely
279 	 *
280 	 * do this if kexec in setup.c ?
281 	 *
282 	 * We need to release the cpus if we are ever going from an
283 	 * UP to an SMP kernel.
284 	 */
285 	smp_release_cpus();
286 	if (ppc_md.kexec_cpu_down)
287 		ppc_md.kexec_cpu_down(0, 0);
288 	local_irq_disable();
289 	hard_irq_disable();
290 }
291 
292 #endif /* SMP */
293 
294 /*
295  * kexec thread structure and stack.
296  *
297  * We need to make sure that this is 16384-byte aligned due to the
298  * way process stacks are handled.  It also must be statically allocated
299  * or allocated as part of the kimage, because everything else may be
300  * overwritten when we copy the kexec image.  We piggyback on the
301  * "init_task" linker section here to statically allocate a stack.
302  *
303  * We could use a smaller stack if we don't care about anything using
304  * current, but that audit has not been performed.
305  */
306 static union thread_union kexec_stack __init_task_data =
307 	{ };
308 
309 /*
310  * For similar reasons to the stack above, the kexecing CPU needs to be on a
311  * static PACA; we switch to kexec_paca.
312  */
313 struct paca_struct kexec_paca;
314 
315 /* Our assembly helper, in misc_64.S */
316 extern void kexec_sequence(void *newstack, unsigned long start,
317 			   void *image, void *control,
318 			   void (*clear_all)(void)) __noreturn;
319 
320 /* too late to fail here */
default_machine_kexec(struct kimage * image)321 void default_machine_kexec(struct kimage *image)
322 {
323 	/* prepare control code if any */
324 
325 	/*
326         * If the kexec boot is the normal one, need to shutdown other cpus
327         * into our wait loop and quiesce interrupts.
328         * Otherwise, in the case of crashed mode (crashing_cpu >= 0),
329         * stopping other CPUs and collecting their pt_regs is done before
330         * using debugger IPI.
331         */
332 
333 	if (crashing_cpu == -1)
334 		kexec_prepare_cpus();
335 
336 	pr_debug("kexec: Starting switchover sequence.\n");
337 
338 	/* switch to a staticly allocated stack.  Based on irq stack code.
339 	 * We setup preempt_count to avoid using VMX in memcpy.
340 	 * XXX: the task struct will likely be invalid once we do the copy!
341 	 */
342 	kexec_stack.thread_info.task = current_thread_info()->task;
343 	kexec_stack.thread_info.flags = 0;
344 	kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET;
345 	kexec_stack.thread_info.cpu = current_thread_info()->cpu;
346 
347 	/* We need a static PACA, too; copy this CPU's PACA over and switch to
348 	 * it.  Also poison per_cpu_offset to catch anyone using non-static
349 	 * data.
350 	 */
351 	memcpy(&kexec_paca, get_paca(), sizeof(struct paca_struct));
352 	kexec_paca.data_offset = 0xedeaddeadeeeeeeeUL;
353 	paca = (struct paca_struct *)RELOC_HIDE(&kexec_paca, 0) -
354 		kexec_paca.paca_index;
355 	setup_paca(&kexec_paca);
356 
357 	/* XXX: If anyone does 'dynamic lppacas' this will also need to be
358 	 * switched to a static version!
359 	 */
360 
361 	/* Some things are best done in assembly.  Finding globals with
362 	 * a toc is easier in C, so pass in what we can.
363 	 */
364 	kexec_sequence(&kexec_stack, image->start, image,
365 			page_address(image->control_code_page),
366 			ppc_md.hpte_clear_all);
367 	/* NOTREACHED */
368 }
369 
370 /* Values we need to export to the second kernel via the device tree. */
371 static unsigned long htab_base;
372 static unsigned long htab_size;
373 
374 static struct property htab_base_prop = {
375 	.name = "linux,htab-base",
376 	.length = sizeof(unsigned long),
377 	.value = &htab_base,
378 };
379 
380 static struct property htab_size_prop = {
381 	.name = "linux,htab-size",
382 	.length = sizeof(unsigned long),
383 	.value = &htab_size,
384 };
385 
export_htab_values(void)386 static int __init export_htab_values(void)
387 {
388 	struct device_node *node;
389 	struct property *prop;
390 
391 	/* On machines with no htab htab_address is NULL */
392 	if (!htab_address)
393 		return -ENODEV;
394 
395 	node = of_find_node_by_path("/chosen");
396 	if (!node)
397 		return -ENODEV;
398 
399 	/* remove any stale propertys so ours can be found */
400 	prop = of_find_property(node, htab_base_prop.name, NULL);
401 	if (prop)
402 		of_remove_property(node, prop);
403 	prop = of_find_property(node, htab_size_prop.name, NULL);
404 	if (prop)
405 		of_remove_property(node, prop);
406 
407 	htab_base = cpu_to_be64(__pa(htab_address));
408 	of_add_property(node, &htab_base_prop);
409 	htab_size = cpu_to_be64(htab_size_bytes);
410 	of_add_property(node, &htab_size_prop);
411 
412 	of_node_put(node);
413 	return 0;
414 }
415 late_initcall(export_htab_values);
416