1 /*
2 * PPC64 code to handle Linux booting another kernel.
3 *
4 * Copyright (C) 2004-2005, IBM Corp.
5 *
6 * Created by: Milton D Miller II
7 *
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
10 */
11
12
13 #include <linux/kexec.h>
14 #include <linux/smp.h>
15 #include <linux/thread_info.h>
16 #include <linux/init_task.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/cpu.h>
20 #include <linux/hardirq.h>
21
22 #include <asm/page.h>
23 #include <asm/current.h>
24 #include <asm/machdep.h>
25 #include <asm/cacheflush.h>
26 #include <asm/paca.h>
27 #include <asm/mmu.h>
28 #include <asm/sections.h> /* _end */
29 #include <asm/prom.h>
30 #include <asm/smp.h>
31 #include <asm/hw_breakpoint.h>
32
33 #ifdef CONFIG_PPC_BOOK3E
default_machine_kexec_prepare(struct kimage * image)34 int default_machine_kexec_prepare(struct kimage *image)
35 {
36 int i;
37 /*
38 * Since we use the kernel fault handlers and paging code to
39 * handle the virtual mode, we must make sure no destination
40 * overlaps kernel static data or bss.
41 */
42 for (i = 0; i < image->nr_segments; i++)
43 if (image->segment[i].mem < __pa(_end))
44 return -ETXTBSY;
45 return 0;
46 }
47 #else
default_machine_kexec_prepare(struct kimage * image)48 int default_machine_kexec_prepare(struct kimage *image)
49 {
50 int i;
51 unsigned long begin, end; /* limits of segment */
52 unsigned long low, high; /* limits of blocked memory range */
53 struct device_node *node;
54 const unsigned long *basep;
55 const unsigned int *sizep;
56
57 if (!ppc_md.hpte_clear_all)
58 return -ENOENT;
59
60 /*
61 * Since we use the kernel fault handlers and paging code to
62 * handle the virtual mode, we must make sure no destination
63 * overlaps kernel static data or bss.
64 */
65 for (i = 0; i < image->nr_segments; i++)
66 if (image->segment[i].mem < __pa(_end))
67 return -ETXTBSY;
68
69 /*
70 * For non-LPAR, we absolutely can not overwrite the mmu hash
71 * table, since we are still using the bolted entries in it to
72 * do the copy. Check that here.
73 *
74 * It is safe if the end is below the start of the blocked
75 * region (end <= low), or if the beginning is after the
76 * end of the blocked region (begin >= high). Use the
77 * boolean identity !(a || b) === (!a && !b).
78 */
79 if (htab_address) {
80 low = __pa(htab_address);
81 high = low + htab_size_bytes;
82
83 for (i = 0; i < image->nr_segments; i++) {
84 begin = image->segment[i].mem;
85 end = begin + image->segment[i].memsz;
86
87 if ((begin < high) && (end > low))
88 return -ETXTBSY;
89 }
90 }
91
92 /* We also should not overwrite the tce tables */
93 for_each_node_by_type(node, "pci") {
94 basep = of_get_property(node, "linux,tce-base", NULL);
95 sizep = of_get_property(node, "linux,tce-size", NULL);
96 if (basep == NULL || sizep == NULL)
97 continue;
98
99 low = *basep;
100 high = low + (*sizep);
101
102 for (i = 0; i < image->nr_segments; i++) {
103 begin = image->segment[i].mem;
104 end = begin + image->segment[i].memsz;
105
106 if ((begin < high) && (end > low))
107 return -ETXTBSY;
108 }
109 }
110
111 return 0;
112 }
113 #endif /* !CONFIG_PPC_BOOK3E */
114
copy_segments(unsigned long ind)115 static void copy_segments(unsigned long ind)
116 {
117 unsigned long entry;
118 unsigned long *ptr;
119 void *dest;
120 void *addr;
121
122 /*
123 * We rely on kexec_load to create a lists that properly
124 * initializes these pointers before they are used.
125 * We will still crash if the list is wrong, but at least
126 * the compiler will be quiet.
127 */
128 ptr = NULL;
129 dest = NULL;
130
131 for (entry = ind; !(entry & IND_DONE); entry = *ptr++) {
132 addr = __va(entry & PAGE_MASK);
133
134 switch (entry & IND_FLAGS) {
135 case IND_DESTINATION:
136 dest = addr;
137 break;
138 case IND_INDIRECTION:
139 ptr = addr;
140 break;
141 case IND_SOURCE:
142 copy_page(dest, addr);
143 dest += PAGE_SIZE;
144 }
145 }
146 }
147
kexec_copy_flush(struct kimage * image)148 void kexec_copy_flush(struct kimage *image)
149 {
150 long i, nr_segments = image->nr_segments;
151 struct kexec_segment ranges[KEXEC_SEGMENT_MAX];
152
153 /* save the ranges on the stack to efficiently flush the icache */
154 memcpy(ranges, image->segment, sizeof(ranges));
155
156 /*
157 * After this call we may not use anything allocated in dynamic
158 * memory, including *image.
159 *
160 * Only globals and the stack are allowed.
161 */
162 copy_segments(image->head);
163
164 /*
165 * we need to clear the icache for all dest pages sometime,
166 * including ones that were in place on the original copy
167 */
168 for (i = 0; i < nr_segments; i++)
169 flush_icache_range((unsigned long)__va(ranges[i].mem),
170 (unsigned long)__va(ranges[i].mem + ranges[i].memsz));
171 }
172
173 #ifdef CONFIG_SMP
174
175 static int kexec_all_irq_disabled = 0;
176
kexec_smp_down(void * arg)177 static void kexec_smp_down(void *arg)
178 {
179 local_irq_disable();
180 hard_irq_disable();
181
182 mb(); /* make sure our irqs are disabled before we say they are */
183 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
184 while(kexec_all_irq_disabled == 0)
185 cpu_relax();
186 mb(); /* make sure all irqs are disabled before this */
187 hw_breakpoint_disable();
188 /*
189 * Now every CPU has IRQs off, we can clear out any pending
190 * IPIs and be sure that no more will come in after this.
191 */
192 if (ppc_md.kexec_cpu_down)
193 ppc_md.kexec_cpu_down(0, 1);
194
195 kexec_smp_wait();
196 /* NOTREACHED */
197 }
198
kexec_prepare_cpus_wait(int wait_state)199 static void kexec_prepare_cpus_wait(int wait_state)
200 {
201 int my_cpu, i, notified=-1;
202
203 hw_breakpoint_disable();
204 my_cpu = get_cpu();
205 /* Make sure each CPU has at least made it to the state we need.
206 *
207 * FIXME: There is a (slim) chance of a problem if not all of the CPUs
208 * are correctly onlined. If somehow we start a CPU on boot with RTAS
209 * start-cpu, but somehow that CPU doesn't write callin_cpu_map[] in
210 * time, the boot CPU will timeout. If it does eventually execute
211 * stuff, the secondary will start up (paca[].cpu_start was written) and
212 * get into a peculiar state. If the platform supports
213 * smp_ops->take_timebase(), the secondary CPU will probably be spinning
214 * in there. If not (i.e. pseries), the secondary will continue on and
215 * try to online itself/idle/etc. If it survives that, we need to find
216 * these possible-but-not-online-but-should-be CPUs and chaperone them
217 * into kexec_smp_wait().
218 */
219 for_each_online_cpu(i) {
220 if (i == my_cpu)
221 continue;
222
223 while (paca[i].kexec_state < wait_state) {
224 barrier();
225 if (i != notified) {
226 printk(KERN_INFO "kexec: waiting for cpu %d "
227 "(physical %d) to enter %i state\n",
228 i, paca[i].hw_cpu_id, wait_state);
229 notified = i;
230 }
231 }
232 }
233 mb();
234 }
235
236 /*
237 * We need to make sure each present CPU is online. The next kernel will scan
238 * the device tree and assume primary threads are online and query secondary
239 * threads via RTAS to online them if required. If we don't online primary
240 * threads, they will be stuck. However, we also online secondary threads as we
241 * may be using 'cede offline'. In this case RTAS doesn't see the secondary
242 * threads as offline -- and again, these CPUs will be stuck.
243 *
244 * So, we online all CPUs that should be running, including secondary threads.
245 */
wake_offline_cpus(void)246 static void wake_offline_cpus(void)
247 {
248 int cpu = 0;
249
250 for_each_present_cpu(cpu) {
251 if (!cpu_online(cpu)) {
252 printk(KERN_INFO "kexec: Waking offline cpu %d.\n",
253 cpu);
254 WARN_ON(cpu_up(cpu));
255 }
256 }
257 }
258
kexec_prepare_cpus(void)259 static void kexec_prepare_cpus(void)
260 {
261 wake_offline_cpus();
262 smp_call_function(kexec_smp_down, NULL, /* wait */0);
263 local_irq_disable();
264 hard_irq_disable();
265
266 mb(); /* make sure IRQs are disabled before we say they are */
267 get_paca()->kexec_state = KEXEC_STATE_IRQS_OFF;
268
269 kexec_prepare_cpus_wait(KEXEC_STATE_IRQS_OFF);
270 /* we are sure every CPU has IRQs off at this point */
271 kexec_all_irq_disabled = 1;
272
273 /* after we tell the others to go down */
274 if (ppc_md.kexec_cpu_down)
275 ppc_md.kexec_cpu_down(0, 0);
276
277 /*
278 * Before removing MMU mappings make sure all CPUs have entered real
279 * mode:
280 */
281 kexec_prepare_cpus_wait(KEXEC_STATE_REAL_MODE);
282
283 put_cpu();
284 }
285
286 #else /* ! SMP */
287
kexec_prepare_cpus(void)288 static void kexec_prepare_cpus(void)
289 {
290 /*
291 * move the secondarys to us so that we can copy
292 * the new kernel 0-0x100 safely
293 *
294 * do this if kexec in setup.c ?
295 *
296 * We need to release the cpus if we are ever going from an
297 * UP to an SMP kernel.
298 */
299 smp_release_cpus();
300 if (ppc_md.kexec_cpu_down)
301 ppc_md.kexec_cpu_down(0, 0);
302 local_irq_disable();
303 hard_irq_disable();
304 }
305
306 #endif /* SMP */
307
308 /*
309 * kexec thread structure and stack.
310 *
311 * We need to make sure that this is 16384-byte aligned due to the
312 * way process stacks are handled. It also must be statically allocated
313 * or allocated as part of the kimage, because everything else may be
314 * overwritten when we copy the kexec image. We piggyback on the
315 * "init_task" linker section here to statically allocate a stack.
316 *
317 * We could use a smaller stack if we don't care about anything using
318 * current, but that audit has not been performed.
319 */
320 static union thread_union kexec_stack __init_task_data =
321 { };
322
323 /*
324 * For similar reasons to the stack above, the kexecing CPU needs to be on a
325 * static PACA; we switch to kexec_paca.
326 */
327 struct paca_struct kexec_paca;
328
329 /* Our assembly helper, in misc_64.S */
330 extern void kexec_sequence(void *newstack, unsigned long start,
331 void *image, void *control,
332 void (*clear_all)(void)) __noreturn;
333
334 /* too late to fail here */
default_machine_kexec(struct kimage * image)335 void default_machine_kexec(struct kimage *image)
336 {
337 /* prepare control code if any */
338
339 /*
340 * If the kexec boot is the normal one, need to shutdown other cpus
341 * into our wait loop and quiesce interrupts.
342 * Otherwise, in the case of crashed mode (crashing_cpu >= 0),
343 * stopping other CPUs and collecting their pt_regs is done before
344 * using debugger IPI.
345 */
346
347 if (!kdump_in_progress())
348 kexec_prepare_cpus();
349
350 pr_debug("kexec: Starting switchover sequence.\n");
351
352 /* switch to a staticly allocated stack. Based on irq stack code.
353 * We setup preempt_count to avoid using VMX in memcpy.
354 * XXX: the task struct will likely be invalid once we do the copy!
355 */
356 kexec_stack.thread_info.task = current_thread_info()->task;
357 kexec_stack.thread_info.flags = 0;
358 kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET;
359 kexec_stack.thread_info.cpu = current_thread_info()->cpu;
360
361 /* We need a static PACA, too; copy this CPU's PACA over and switch to
362 * it. Also poison per_cpu_offset to catch anyone using non-static
363 * data.
364 */
365 memcpy(&kexec_paca, get_paca(), sizeof(struct paca_struct));
366 kexec_paca.data_offset = 0xedeaddeadeeeeeeeUL;
367 paca = (struct paca_struct *)RELOC_HIDE(&kexec_paca, 0) -
368 kexec_paca.paca_index;
369 setup_paca(&kexec_paca);
370
371 /* XXX: If anyone does 'dynamic lppacas' this will also need to be
372 * switched to a static version!
373 */
374
375 /* Some things are best done in assembly. Finding globals with
376 * a toc is easier in C, so pass in what we can.
377 */
378 kexec_sequence(&kexec_stack, image->start, image,
379 page_address(image->control_code_page),
380 ppc_md.hpte_clear_all);
381 /* NOTREACHED */
382 }
383
384 #ifndef CONFIG_PPC_BOOK3E
385 /* Values we need to export to the second kernel via the device tree. */
386 static unsigned long htab_base;
387 static unsigned long htab_size;
388
389 static struct property htab_base_prop = {
390 .name = "linux,htab-base",
391 .length = sizeof(unsigned long),
392 .value = &htab_base,
393 };
394
395 static struct property htab_size_prop = {
396 .name = "linux,htab-size",
397 .length = sizeof(unsigned long),
398 .value = &htab_size,
399 };
400
export_htab_values(void)401 static int __init export_htab_values(void)
402 {
403 struct device_node *node;
404 struct property *prop;
405
406 /* On machines with no htab htab_address is NULL */
407 if (!htab_address)
408 return -ENODEV;
409
410 node = of_find_node_by_path("/chosen");
411 if (!node)
412 return -ENODEV;
413
414 /* remove any stale propertys so ours can be found */
415 prop = of_find_property(node, htab_base_prop.name, NULL);
416 if (prop)
417 of_remove_property(node, prop);
418 prop = of_find_property(node, htab_size_prop.name, NULL);
419 if (prop)
420 of_remove_property(node, prop);
421
422 htab_base = cpu_to_be64(__pa(htab_address));
423 of_add_property(node, &htab_base_prop);
424 htab_size = cpu_to_be64(htab_size_bytes);
425 of_add_property(node, &htab_size_prop);
426
427 of_node_put(node);
428 return 0;
429 }
430 late_initcall(export_htab_values);
431 #endif /* !CONFIG_PPC_BOOK3E */
432