1 /*
2 * Derived from "arch/i386/kernel/process.c"
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6 * Paul Mackerras (paulus@cs.anu.edu.au)
7 *
8 * PowerPC version
9 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/init.h>
29 #include <linux/prctl.h>
30 #include <linux/init_task.h>
31 #include <linux/module.h>
32 #include <linux/kallsyms.h>
33 #include <linux/mqueue.h>
34 #include <linux/hardirq.h>
35 #include <linux/utsname.h>
36 #include <linux/kernel_stat.h>
37
38 #include <asm/pgtable.h>
39 #include <asm/uaccess.h>
40 #include <asm/system.h>
41 #include <asm/io.h>
42 #include <asm/processor.h>
43 #include <asm/mmu.h>
44 #include <asm/prom.h>
45 #include <asm/machdep.h>
46 #include <asm/time.h>
47 #include <asm/syscalls.h>
48 #ifdef CONFIG_PPC64
49 #include <asm/firmware.h>
50 #endif
51 #include <linux/kprobes.h>
52 #include <linux/kdebug.h>
53
54 extern unsigned long _get_SP(void);
55
56 #ifndef CONFIG_SMP
57 struct task_struct *last_task_used_math = NULL;
58 struct task_struct *last_task_used_altivec = NULL;
59 struct task_struct *last_task_used_vsx = NULL;
60 struct task_struct *last_task_used_spe = NULL;
61 #endif
62
63 /*
64 * Make sure the floating-point register state in the
65 * the thread_struct is up to date for task tsk.
66 */
flush_fp_to_thread(struct task_struct * tsk)67 void flush_fp_to_thread(struct task_struct *tsk)
68 {
69 if (tsk->thread.regs) {
70 /*
71 * We need to disable preemption here because if we didn't,
72 * another process could get scheduled after the regs->msr
73 * test but before we have finished saving the FP registers
74 * to the thread_struct. That process could take over the
75 * FPU, and then when we get scheduled again we would store
76 * bogus values for the remaining FP registers.
77 */
78 preempt_disable();
79 if (tsk->thread.regs->msr & MSR_FP) {
80 #ifdef CONFIG_SMP
81 /*
82 * This should only ever be called for current or
83 * for a stopped child process. Since we save away
84 * the FP register state on context switch on SMP,
85 * there is something wrong if a stopped child appears
86 * to still have its FP state in the CPU registers.
87 */
88 BUG_ON(tsk != current);
89 #endif
90 giveup_fpu(tsk);
91 }
92 preempt_enable();
93 }
94 }
95
enable_kernel_fp(void)96 void enable_kernel_fp(void)
97 {
98 WARN_ON(preemptible());
99
100 #ifdef CONFIG_SMP
101 if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
102 giveup_fpu(current);
103 else
104 giveup_fpu(NULL); /* just enables FP for kernel */
105 #else
106 giveup_fpu(last_task_used_math);
107 #endif /* CONFIG_SMP */
108 }
109 EXPORT_SYMBOL(enable_kernel_fp);
110
111 #ifdef CONFIG_ALTIVEC
enable_kernel_altivec(void)112 void enable_kernel_altivec(void)
113 {
114 WARN_ON(preemptible());
115
116 #ifdef CONFIG_SMP
117 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
118 giveup_altivec(current);
119 else
120 giveup_altivec(NULL); /* just enable AltiVec for kernel - force */
121 #else
122 giveup_altivec(last_task_used_altivec);
123 #endif /* CONFIG_SMP */
124 }
125 EXPORT_SYMBOL(enable_kernel_altivec);
126
127 /*
128 * Make sure the VMX/Altivec register state in the
129 * the thread_struct is up to date for task tsk.
130 */
flush_altivec_to_thread(struct task_struct * tsk)131 void flush_altivec_to_thread(struct task_struct *tsk)
132 {
133 if (tsk->thread.regs) {
134 preempt_disable();
135 if (tsk->thread.regs->msr & MSR_VEC) {
136 #ifdef CONFIG_SMP
137 BUG_ON(tsk != current);
138 #endif
139 giveup_altivec(tsk);
140 }
141 preempt_enable();
142 }
143 }
144 #endif /* CONFIG_ALTIVEC */
145
146 #ifdef CONFIG_VSX
147 #if 0
148 /* not currently used, but some crazy RAID module might want to later */
149 void enable_kernel_vsx(void)
150 {
151 WARN_ON(preemptible());
152
153 #ifdef CONFIG_SMP
154 if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
155 giveup_vsx(current);
156 else
157 giveup_vsx(NULL); /* just enable vsx for kernel - force */
158 #else
159 giveup_vsx(last_task_used_vsx);
160 #endif /* CONFIG_SMP */
161 }
162 EXPORT_SYMBOL(enable_kernel_vsx);
163 #endif
164
giveup_vsx(struct task_struct * tsk)165 void giveup_vsx(struct task_struct *tsk)
166 {
167 giveup_fpu(tsk);
168 giveup_altivec(tsk);
169 __giveup_vsx(tsk);
170 }
171
flush_vsx_to_thread(struct task_struct * tsk)172 void flush_vsx_to_thread(struct task_struct *tsk)
173 {
174 if (tsk->thread.regs) {
175 preempt_disable();
176 if (tsk->thread.regs->msr & MSR_VSX) {
177 #ifdef CONFIG_SMP
178 BUG_ON(tsk != current);
179 #endif
180 giveup_vsx(tsk);
181 }
182 preempt_enable();
183 }
184 }
185 #endif /* CONFIG_VSX */
186
187 #ifdef CONFIG_SPE
188
enable_kernel_spe(void)189 void enable_kernel_spe(void)
190 {
191 WARN_ON(preemptible());
192
193 #ifdef CONFIG_SMP
194 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
195 giveup_spe(current);
196 else
197 giveup_spe(NULL); /* just enable SPE for kernel - force */
198 #else
199 giveup_spe(last_task_used_spe);
200 #endif /* __SMP __ */
201 }
202 EXPORT_SYMBOL(enable_kernel_spe);
203
flush_spe_to_thread(struct task_struct * tsk)204 void flush_spe_to_thread(struct task_struct *tsk)
205 {
206 if (tsk->thread.regs) {
207 preempt_disable();
208 if (tsk->thread.regs->msr & MSR_SPE) {
209 #ifdef CONFIG_SMP
210 BUG_ON(tsk != current);
211 #endif
212 giveup_spe(tsk);
213 }
214 preempt_enable();
215 }
216 }
217 #endif /* CONFIG_SPE */
218
219 #ifndef CONFIG_SMP
220 /*
221 * If we are doing lazy switching of CPU state (FP, altivec or SPE),
222 * and the current task has some state, discard it.
223 */
discard_lazy_cpu_state(void)224 void discard_lazy_cpu_state(void)
225 {
226 preempt_disable();
227 if (last_task_used_math == current)
228 last_task_used_math = NULL;
229 #ifdef CONFIG_ALTIVEC
230 if (last_task_used_altivec == current)
231 last_task_used_altivec = NULL;
232 #endif /* CONFIG_ALTIVEC */
233 #ifdef CONFIG_VSX
234 if (last_task_used_vsx == current)
235 last_task_used_vsx = NULL;
236 #endif /* CONFIG_VSX */
237 #ifdef CONFIG_SPE
238 if (last_task_used_spe == current)
239 last_task_used_spe = NULL;
240 #endif
241 preempt_enable();
242 }
243 #endif /* CONFIG_SMP */
244
do_dabr(struct pt_regs * regs,unsigned long address,unsigned long error_code)245 void do_dabr(struct pt_regs *regs, unsigned long address,
246 unsigned long error_code)
247 {
248 siginfo_t info;
249
250 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
251 11, SIGSEGV) == NOTIFY_STOP)
252 return;
253
254 if (debugger_dabr_match(regs))
255 return;
256
257 /* Clear the DAC and struct entries. One shot trigger */
258 #if defined(CONFIG_BOOKE)
259 mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~(DBSR_DAC1R | DBSR_DAC1W
260 | DBCR0_IDM));
261 #endif
262
263 /* Clear the DABR */
264 set_dabr(0);
265
266 /* Deliver the signal to userspace */
267 info.si_signo = SIGTRAP;
268 info.si_errno = 0;
269 info.si_code = TRAP_HWBKPT;
270 info.si_addr = (void __user *)address;
271 force_sig_info(SIGTRAP, &info, current);
272 }
273
274 static DEFINE_PER_CPU(unsigned long, current_dabr);
275
set_dabr(unsigned long dabr)276 int set_dabr(unsigned long dabr)
277 {
278 __get_cpu_var(current_dabr) = dabr;
279
280 if (ppc_md.set_dabr)
281 return ppc_md.set_dabr(dabr);
282
283 /* XXX should we have a CPU_FTR_HAS_DABR ? */
284 #if defined(CONFIG_PPC64) || defined(CONFIG_6xx)
285 mtspr(SPRN_DABR, dabr);
286 #endif
287
288 #if defined(CONFIG_BOOKE)
289 mtspr(SPRN_DAC1, dabr);
290 #endif
291
292 return 0;
293 }
294
295 #ifdef CONFIG_PPC64
296 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
297 #endif
298
__switch_to(struct task_struct * prev,struct task_struct * new)299 struct task_struct *__switch_to(struct task_struct *prev,
300 struct task_struct *new)
301 {
302 struct thread_struct *new_thread, *old_thread;
303 unsigned long flags;
304 struct task_struct *last;
305
306 #ifdef CONFIG_SMP
307 /* avoid complexity of lazy save/restore of fpu
308 * by just saving it every time we switch out if
309 * this task used the fpu during the last quantum.
310 *
311 * If it tries to use the fpu again, it'll trap and
312 * reload its fp regs. So we don't have to do a restore
313 * every switch, just a save.
314 * -- Cort
315 */
316 if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
317 giveup_fpu(prev);
318 #ifdef CONFIG_ALTIVEC
319 /*
320 * If the previous thread used altivec in the last quantum
321 * (thus changing altivec regs) then save them.
322 * We used to check the VRSAVE register but not all apps
323 * set it, so we don't rely on it now (and in fact we need
324 * to save & restore VSCR even if VRSAVE == 0). -- paulus
325 *
326 * On SMP we always save/restore altivec regs just to avoid the
327 * complexity of changing processors.
328 * -- Cort
329 */
330 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
331 giveup_altivec(prev);
332 #endif /* CONFIG_ALTIVEC */
333 #ifdef CONFIG_VSX
334 if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
335 /* VMX and FPU registers are already save here */
336 __giveup_vsx(prev);
337 #endif /* CONFIG_VSX */
338 #ifdef CONFIG_SPE
339 /*
340 * If the previous thread used spe in the last quantum
341 * (thus changing spe regs) then save them.
342 *
343 * On SMP we always save/restore spe regs just to avoid the
344 * complexity of changing processors.
345 */
346 if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
347 giveup_spe(prev);
348 #endif /* CONFIG_SPE */
349
350 #else /* CONFIG_SMP */
351 #ifdef CONFIG_ALTIVEC
352 /* Avoid the trap. On smp this this never happens since
353 * we don't set last_task_used_altivec -- Cort
354 */
355 if (new->thread.regs && last_task_used_altivec == new)
356 new->thread.regs->msr |= MSR_VEC;
357 #endif /* CONFIG_ALTIVEC */
358 #ifdef CONFIG_VSX
359 if (new->thread.regs && last_task_used_vsx == new)
360 new->thread.regs->msr |= MSR_VSX;
361 #endif /* CONFIG_VSX */
362 #ifdef CONFIG_SPE
363 /* Avoid the trap. On smp this this never happens since
364 * we don't set last_task_used_spe
365 */
366 if (new->thread.regs && last_task_used_spe == new)
367 new->thread.regs->msr |= MSR_SPE;
368 #endif /* CONFIG_SPE */
369
370 #endif /* CONFIG_SMP */
371
372 if (unlikely(__get_cpu_var(current_dabr) != new->thread.dabr))
373 set_dabr(new->thread.dabr);
374
375 #if defined(CONFIG_BOOKE)
376 /* If new thread DAC (HW breakpoint) is the same then leave it */
377 if (new->thread.dabr)
378 set_dabr(new->thread.dabr);
379 #endif
380
381 new_thread = &new->thread;
382 old_thread = ¤t->thread;
383
384 #ifdef CONFIG_PPC64
385 /*
386 * Collect processor utilization data per process
387 */
388 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
389 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
390 long unsigned start_tb, current_tb;
391 start_tb = old_thread->start_tb;
392 cu->current_tb = current_tb = mfspr(SPRN_PURR);
393 old_thread->accum_tb += (current_tb - start_tb);
394 new_thread->start_tb = current_tb;
395 }
396 #endif
397
398 local_irq_save(flags);
399
400 account_system_vtime(current);
401 account_process_vtime(current);
402 calculate_steal_time();
403
404 /*
405 * We can't take a PMU exception inside _switch() since there is a
406 * window where the kernel stack SLB and the kernel stack are out
407 * of sync. Hard disable here.
408 */
409 hard_irq_disable();
410 last = _switch(old_thread, new_thread);
411
412 local_irq_restore(flags);
413
414 return last;
415 }
416
417 static int instructions_to_print = 16;
418
show_instructions(struct pt_regs * regs)419 static void show_instructions(struct pt_regs *regs)
420 {
421 int i;
422 unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
423 sizeof(int));
424
425 printk("Instruction dump:");
426
427 for (i = 0; i < instructions_to_print; i++) {
428 int instr;
429
430 if (!(i % 8))
431 printk("\n");
432
433 #if !defined(CONFIG_BOOKE)
434 /* If executing with the IMMU off, adjust pc rather
435 * than print XXXXXXXX.
436 */
437 if (!(regs->msr & MSR_IR))
438 pc = (unsigned long)phys_to_virt(pc);
439 #endif
440
441 /* We use __get_user here *only* to avoid an OOPS on a
442 * bad address because the pc *should* only be a
443 * kernel address.
444 */
445 if (!__kernel_text_address(pc) ||
446 __get_user(instr, (unsigned int __user *)pc)) {
447 printk("XXXXXXXX ");
448 } else {
449 if (regs->nip == pc)
450 printk("<%08x> ", instr);
451 else
452 printk("%08x ", instr);
453 }
454
455 pc += sizeof(int);
456 }
457
458 printk("\n");
459 }
460
461 static struct regbit {
462 unsigned long bit;
463 const char *name;
464 } msr_bits[] = {
465 {MSR_EE, "EE"},
466 {MSR_PR, "PR"},
467 {MSR_FP, "FP"},
468 {MSR_VEC, "VEC"},
469 {MSR_VSX, "VSX"},
470 {MSR_ME, "ME"},
471 {MSR_CE, "CE"},
472 {MSR_DE, "DE"},
473 {MSR_IR, "IR"},
474 {MSR_DR, "DR"},
475 {0, NULL}
476 };
477
printbits(unsigned long val,struct regbit * bits)478 static void printbits(unsigned long val, struct regbit *bits)
479 {
480 const char *sep = "";
481
482 printk("<");
483 for (; bits->bit; ++bits)
484 if (val & bits->bit) {
485 printk("%s%s", sep, bits->name);
486 sep = ",";
487 }
488 printk(">");
489 }
490
491 #ifdef CONFIG_PPC64
492 #define REG "%016lx"
493 #define REGS_PER_LINE 4
494 #define LAST_VOLATILE 13
495 #else
496 #define REG "%08lx"
497 #define REGS_PER_LINE 8
498 #define LAST_VOLATILE 12
499 #endif
500
show_regs(struct pt_regs * regs)501 void show_regs(struct pt_regs * regs)
502 {
503 int i, trap;
504
505 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
506 regs->nip, regs->link, regs->ctr);
507 printk("REGS: %p TRAP: %04lx %s (%s)\n",
508 regs, regs->trap, print_tainted(), init_utsname()->release);
509 printk("MSR: "REG" ", regs->msr);
510 printbits(regs->msr, msr_bits);
511 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
512 trap = TRAP(regs);
513 if (trap == 0x300 || trap == 0x600)
514 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
515 printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
516 #else
517 printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
518 #endif
519 printk("TASK = %p[%d] '%s' THREAD: %p",
520 current, task_pid_nr(current), current->comm, task_thread_info(current));
521
522 #ifdef CONFIG_SMP
523 printk(" CPU: %d", raw_smp_processor_id());
524 #endif /* CONFIG_SMP */
525
526 for (i = 0; i < 32; i++) {
527 if ((i % REGS_PER_LINE) == 0)
528 printk("\n" KERN_INFO "GPR%02d: ", i);
529 printk(REG " ", regs->gpr[i]);
530 if (i == LAST_VOLATILE && !FULL_REGS(regs))
531 break;
532 }
533 printk("\n");
534 #ifdef CONFIG_KALLSYMS
535 /*
536 * Lookup NIP late so we have the best change of getting the
537 * above info out without failing
538 */
539 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
540 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
541 #endif
542 show_stack(current, (unsigned long *) regs->gpr[1]);
543 if (!user_mode(regs))
544 show_instructions(regs);
545 }
546
exit_thread(void)547 void exit_thread(void)
548 {
549 discard_lazy_cpu_state();
550 }
551
flush_thread(void)552 void flush_thread(void)
553 {
554 #ifdef CONFIG_PPC64
555 struct thread_info *t = current_thread_info();
556
557 if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
558 clear_ti_thread_flag(t, TIF_ABI_PENDING);
559 if (test_ti_thread_flag(t, TIF_32BIT))
560 clear_ti_thread_flag(t, TIF_32BIT);
561 else
562 set_ti_thread_flag(t, TIF_32BIT);
563 }
564 #endif
565
566 discard_lazy_cpu_state();
567
568 if (current->thread.dabr) {
569 current->thread.dabr = 0;
570 set_dabr(0);
571
572 #if defined(CONFIG_BOOKE)
573 current->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W);
574 #endif
575 }
576 }
577
578 void
release_thread(struct task_struct * t)579 release_thread(struct task_struct *t)
580 {
581 }
582
583 /*
584 * This gets called before we allocate a new thread and copy
585 * the current task into it.
586 */
prepare_to_copy(struct task_struct * tsk)587 void prepare_to_copy(struct task_struct *tsk)
588 {
589 flush_fp_to_thread(current);
590 flush_altivec_to_thread(current);
591 flush_vsx_to_thread(current);
592 flush_spe_to_thread(current);
593 }
594
595 /*
596 * Copy a thread..
597 */
copy_thread(int nr,unsigned long clone_flags,unsigned long usp,unsigned long unused,struct task_struct * p,struct pt_regs * regs)598 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
599 unsigned long unused, struct task_struct *p,
600 struct pt_regs *regs)
601 {
602 struct pt_regs *childregs, *kregs;
603 extern void ret_from_fork(void);
604 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
605
606 CHECK_FULL_REGS(regs);
607 /* Copy registers */
608 sp -= sizeof(struct pt_regs);
609 childregs = (struct pt_regs *) sp;
610 *childregs = *regs;
611 if ((childregs->msr & MSR_PR) == 0) {
612 /* for kernel thread, set `current' and stackptr in new task */
613 childregs->gpr[1] = sp + sizeof(struct pt_regs);
614 #ifdef CONFIG_PPC32
615 childregs->gpr[2] = (unsigned long) p;
616 #else
617 clear_tsk_thread_flag(p, TIF_32BIT);
618 #endif
619 p->thread.regs = NULL; /* no user register state */
620 } else {
621 childregs->gpr[1] = usp;
622 p->thread.regs = childregs;
623 if (clone_flags & CLONE_SETTLS) {
624 #ifdef CONFIG_PPC64
625 if (!test_thread_flag(TIF_32BIT))
626 childregs->gpr[13] = childregs->gpr[6];
627 else
628 #endif
629 childregs->gpr[2] = childregs->gpr[6];
630 }
631 }
632 childregs->gpr[3] = 0; /* Result from fork() */
633 sp -= STACK_FRAME_OVERHEAD;
634
635 /*
636 * The way this works is that at some point in the future
637 * some task will call _switch to switch to the new task.
638 * That will pop off the stack frame created below and start
639 * the new task running at ret_from_fork. The new task will
640 * do some house keeping and then return from the fork or clone
641 * system call, using the stack frame created above.
642 */
643 sp -= sizeof(struct pt_regs);
644 kregs = (struct pt_regs *) sp;
645 sp -= STACK_FRAME_OVERHEAD;
646 p->thread.ksp = sp;
647 p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
648 _ALIGN_UP(sizeof(struct thread_info), 16);
649
650 #ifdef CONFIG_PPC64
651 if (cpu_has_feature(CPU_FTR_SLB)) {
652 unsigned long sp_vsid;
653 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
654
655 if (cpu_has_feature(CPU_FTR_1T_SEGMENT))
656 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
657 << SLB_VSID_SHIFT_1T;
658 else
659 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
660 << SLB_VSID_SHIFT;
661 sp_vsid |= SLB_VSID_KERNEL | llp;
662 p->thread.ksp_vsid = sp_vsid;
663 }
664
665 /*
666 * The PPC64 ABI makes use of a TOC to contain function
667 * pointers. The function (ret_from_except) is actually a pointer
668 * to the TOC entry. The first entry is a pointer to the actual
669 * function.
670 */
671 kregs->nip = *((unsigned long *)ret_from_fork);
672 #else
673 kregs->nip = (unsigned long)ret_from_fork;
674 #endif
675
676 return 0;
677 }
678
679 /*
680 * Set up a thread for executing a new program
681 */
start_thread(struct pt_regs * regs,unsigned long start,unsigned long sp)682 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
683 {
684 #ifdef CONFIG_PPC64
685 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
686 #endif
687
688 set_fs(USER_DS);
689
690 /*
691 * If we exec out of a kernel thread then thread.regs will not be
692 * set. Do it now.
693 */
694 if (!current->thread.regs) {
695 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
696 current->thread.regs = regs - 1;
697 }
698
699 memset(regs->gpr, 0, sizeof(regs->gpr));
700 regs->ctr = 0;
701 regs->link = 0;
702 regs->xer = 0;
703 regs->ccr = 0;
704 regs->gpr[1] = sp;
705
706 /*
707 * We have just cleared all the nonvolatile GPRs, so make
708 * FULL_REGS(regs) return true. This is necessary to allow
709 * ptrace to examine the thread immediately after exec.
710 */
711 regs->trap &= ~1UL;
712
713 #ifdef CONFIG_PPC32
714 regs->mq = 0;
715 regs->nip = start;
716 regs->msr = MSR_USER;
717 #else
718 if (!test_thread_flag(TIF_32BIT)) {
719 unsigned long entry, toc;
720
721 /* start is a relocated pointer to the function descriptor for
722 * the elf _start routine. The first entry in the function
723 * descriptor is the entry address of _start and the second
724 * entry is the TOC value we need to use.
725 */
726 __get_user(entry, (unsigned long __user *)start);
727 __get_user(toc, (unsigned long __user *)start+1);
728
729 /* Check whether the e_entry function descriptor entries
730 * need to be relocated before we can use them.
731 */
732 if (load_addr != 0) {
733 entry += load_addr;
734 toc += load_addr;
735 }
736 regs->nip = entry;
737 regs->gpr[2] = toc;
738 regs->msr = MSR_USER64;
739 } else {
740 regs->nip = start;
741 regs->gpr[2] = 0;
742 regs->msr = MSR_USER32;
743 }
744 #endif
745
746 discard_lazy_cpu_state();
747 #ifdef CONFIG_VSX
748 current->thread.used_vsr = 0;
749 #endif
750 memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
751 current->thread.fpscr.val = 0;
752 #ifdef CONFIG_ALTIVEC
753 memset(current->thread.vr, 0, sizeof(current->thread.vr));
754 memset(¤t->thread.vscr, 0, sizeof(current->thread.vscr));
755 current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
756 current->thread.vrsave = 0;
757 current->thread.used_vr = 0;
758 #endif /* CONFIG_ALTIVEC */
759 #ifdef CONFIG_SPE
760 memset(current->thread.evr, 0, sizeof(current->thread.evr));
761 current->thread.acc = 0;
762 current->thread.spefscr = 0;
763 current->thread.used_spe = 0;
764 #endif /* CONFIG_SPE */
765 }
766
767 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
768 | PR_FP_EXC_RES | PR_FP_EXC_INV)
769
set_fpexc_mode(struct task_struct * tsk,unsigned int val)770 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
771 {
772 struct pt_regs *regs = tsk->thread.regs;
773
774 /* This is a bit hairy. If we are an SPE enabled processor
775 * (have embedded fp) we store the IEEE exception enable flags in
776 * fpexc_mode. fpexc_mode is also used for setting FP exception
777 * mode (asyn, precise, disabled) for 'Classic' FP. */
778 if (val & PR_FP_EXC_SW_ENABLE) {
779 #ifdef CONFIG_SPE
780 if (cpu_has_feature(CPU_FTR_SPE)) {
781 tsk->thread.fpexc_mode = val &
782 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
783 return 0;
784 } else {
785 return -EINVAL;
786 }
787 #else
788 return -EINVAL;
789 #endif
790 }
791
792 /* on a CONFIG_SPE this does not hurt us. The bits that
793 * __pack_fe01 use do not overlap with bits used for
794 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
795 * on CONFIG_SPE implementations are reserved so writing to
796 * them does not change anything */
797 if (val > PR_FP_EXC_PRECISE)
798 return -EINVAL;
799 tsk->thread.fpexc_mode = __pack_fe01(val);
800 if (regs != NULL && (regs->msr & MSR_FP) != 0)
801 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
802 | tsk->thread.fpexc_mode;
803 return 0;
804 }
805
get_fpexc_mode(struct task_struct * tsk,unsigned long adr)806 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
807 {
808 unsigned int val;
809
810 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
811 #ifdef CONFIG_SPE
812 if (cpu_has_feature(CPU_FTR_SPE))
813 val = tsk->thread.fpexc_mode;
814 else
815 return -EINVAL;
816 #else
817 return -EINVAL;
818 #endif
819 else
820 val = __unpack_fe01(tsk->thread.fpexc_mode);
821 return put_user(val, (unsigned int __user *) adr);
822 }
823
set_endian(struct task_struct * tsk,unsigned int val)824 int set_endian(struct task_struct *tsk, unsigned int val)
825 {
826 struct pt_regs *regs = tsk->thread.regs;
827
828 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
829 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
830 return -EINVAL;
831
832 if (regs == NULL)
833 return -EINVAL;
834
835 if (val == PR_ENDIAN_BIG)
836 regs->msr &= ~MSR_LE;
837 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
838 regs->msr |= MSR_LE;
839 else
840 return -EINVAL;
841
842 return 0;
843 }
844
get_endian(struct task_struct * tsk,unsigned long adr)845 int get_endian(struct task_struct *tsk, unsigned long adr)
846 {
847 struct pt_regs *regs = tsk->thread.regs;
848 unsigned int val;
849
850 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
851 !cpu_has_feature(CPU_FTR_REAL_LE))
852 return -EINVAL;
853
854 if (regs == NULL)
855 return -EINVAL;
856
857 if (regs->msr & MSR_LE) {
858 if (cpu_has_feature(CPU_FTR_REAL_LE))
859 val = PR_ENDIAN_LITTLE;
860 else
861 val = PR_ENDIAN_PPC_LITTLE;
862 } else
863 val = PR_ENDIAN_BIG;
864
865 return put_user(val, (unsigned int __user *)adr);
866 }
867
set_unalign_ctl(struct task_struct * tsk,unsigned int val)868 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
869 {
870 tsk->thread.align_ctl = val;
871 return 0;
872 }
873
get_unalign_ctl(struct task_struct * tsk,unsigned long adr)874 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
875 {
876 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
877 }
878
879 #define TRUNC_PTR(x) ((typeof(x))(((unsigned long)(x)) & 0xffffffff))
880
sys_clone(unsigned long clone_flags,unsigned long usp,int __user * parent_tidp,void __user * child_threadptr,int __user * child_tidp,int p6,struct pt_regs * regs)881 int sys_clone(unsigned long clone_flags, unsigned long usp,
882 int __user *parent_tidp, void __user *child_threadptr,
883 int __user *child_tidp, int p6,
884 struct pt_regs *regs)
885 {
886 CHECK_FULL_REGS(regs);
887 if (usp == 0)
888 usp = regs->gpr[1]; /* stack pointer for child */
889 #ifdef CONFIG_PPC64
890 if (test_thread_flag(TIF_32BIT)) {
891 parent_tidp = TRUNC_PTR(parent_tidp);
892 child_tidp = TRUNC_PTR(child_tidp);
893 }
894 #endif
895 return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
896 }
897
sys_fork(unsigned long p1,unsigned long p2,unsigned long p3,unsigned long p4,unsigned long p5,unsigned long p6,struct pt_regs * regs)898 int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
899 unsigned long p4, unsigned long p5, unsigned long p6,
900 struct pt_regs *regs)
901 {
902 CHECK_FULL_REGS(regs);
903 return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
904 }
905
sys_vfork(unsigned long p1,unsigned long p2,unsigned long p3,unsigned long p4,unsigned long p5,unsigned long p6,struct pt_regs * regs)906 int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
907 unsigned long p4, unsigned long p5, unsigned long p6,
908 struct pt_regs *regs)
909 {
910 CHECK_FULL_REGS(regs);
911 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
912 regs, 0, NULL, NULL);
913 }
914
sys_execve(unsigned long a0,unsigned long a1,unsigned long a2,unsigned long a3,unsigned long a4,unsigned long a5,struct pt_regs * regs)915 int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
916 unsigned long a3, unsigned long a4, unsigned long a5,
917 struct pt_regs *regs)
918 {
919 int error;
920 char *filename;
921
922 filename = getname((char __user *) a0);
923 error = PTR_ERR(filename);
924 if (IS_ERR(filename))
925 goto out;
926 flush_fp_to_thread(current);
927 flush_altivec_to_thread(current);
928 flush_spe_to_thread(current);
929 error = do_execve(filename, (char __user * __user *) a1,
930 (char __user * __user *) a2, regs);
931 putname(filename);
932 out:
933 return error;
934 }
935
936 #ifdef CONFIG_IRQSTACKS
valid_irq_stack(unsigned long sp,struct task_struct * p,unsigned long nbytes)937 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
938 unsigned long nbytes)
939 {
940 unsigned long stack_page;
941 unsigned long cpu = task_cpu(p);
942
943 /*
944 * Avoid crashing if the stack has overflowed and corrupted
945 * task_cpu(p), which is in the thread_info struct.
946 */
947 if (cpu < NR_CPUS && cpu_possible(cpu)) {
948 stack_page = (unsigned long) hardirq_ctx[cpu];
949 if (sp >= stack_page + sizeof(struct thread_struct)
950 && sp <= stack_page + THREAD_SIZE - nbytes)
951 return 1;
952
953 stack_page = (unsigned long) softirq_ctx[cpu];
954 if (sp >= stack_page + sizeof(struct thread_struct)
955 && sp <= stack_page + THREAD_SIZE - nbytes)
956 return 1;
957 }
958 return 0;
959 }
960
961 #else
962 #define valid_irq_stack(sp, p, nb) 0
963 #endif /* CONFIG_IRQSTACKS */
964
validate_sp(unsigned long sp,struct task_struct * p,unsigned long nbytes)965 int validate_sp(unsigned long sp, struct task_struct *p,
966 unsigned long nbytes)
967 {
968 unsigned long stack_page = (unsigned long)task_stack_page(p);
969
970 if (sp >= stack_page + sizeof(struct thread_struct)
971 && sp <= stack_page + THREAD_SIZE - nbytes)
972 return 1;
973
974 return valid_irq_stack(sp, p, nbytes);
975 }
976
977 EXPORT_SYMBOL(validate_sp);
978
get_wchan(struct task_struct * p)979 unsigned long get_wchan(struct task_struct *p)
980 {
981 unsigned long ip, sp;
982 int count = 0;
983
984 if (!p || p == current || p->state == TASK_RUNNING)
985 return 0;
986
987 sp = p->thread.ksp;
988 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
989 return 0;
990
991 do {
992 sp = *(unsigned long *)sp;
993 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
994 return 0;
995 if (count > 0) {
996 ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
997 if (!in_sched_functions(ip))
998 return ip;
999 }
1000 } while (count++ < 16);
1001 return 0;
1002 }
1003
1004 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1005
show_stack(struct task_struct * tsk,unsigned long * stack)1006 void show_stack(struct task_struct *tsk, unsigned long *stack)
1007 {
1008 unsigned long sp, ip, lr, newsp;
1009 int count = 0;
1010 int firstframe = 1;
1011
1012 sp = (unsigned long) stack;
1013 if (tsk == NULL)
1014 tsk = current;
1015 if (sp == 0) {
1016 if (tsk == current)
1017 asm("mr %0,1" : "=r" (sp));
1018 else
1019 sp = tsk->thread.ksp;
1020 }
1021
1022 lr = 0;
1023 printk("Call Trace:\n");
1024 do {
1025 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1026 return;
1027
1028 stack = (unsigned long *) sp;
1029 newsp = stack[0];
1030 ip = stack[STACK_FRAME_LR_SAVE];
1031 if (!firstframe || ip != lr) {
1032 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1033 if (firstframe)
1034 printk(" (unreliable)");
1035 printk("\n");
1036 }
1037 firstframe = 0;
1038
1039 /*
1040 * See if this is an exception frame.
1041 * We look for the "regshere" marker in the current frame.
1042 */
1043 if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1044 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1045 struct pt_regs *regs = (struct pt_regs *)
1046 (sp + STACK_FRAME_OVERHEAD);
1047 lr = regs->link;
1048 printk("--- Exception: %lx at %pS\n LR = %pS\n",
1049 regs->trap, (void *)regs->nip, (void *)lr);
1050 firstframe = 1;
1051 }
1052
1053 sp = newsp;
1054 } while (count++ < kstack_depth_to_print);
1055 }
1056
dump_stack(void)1057 void dump_stack(void)
1058 {
1059 show_stack(current, NULL);
1060 }
1061 EXPORT_SYMBOL(dump_stack);
1062
1063 #ifdef CONFIG_PPC64
ppc64_runlatch_on(void)1064 void ppc64_runlatch_on(void)
1065 {
1066 unsigned long ctrl;
1067
1068 if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) {
1069 HMT_medium();
1070
1071 ctrl = mfspr(SPRN_CTRLF);
1072 ctrl |= CTRL_RUNLATCH;
1073 mtspr(SPRN_CTRLT, ctrl);
1074
1075 set_thread_flag(TIF_RUNLATCH);
1076 }
1077 }
1078
ppc64_runlatch_off(void)1079 void ppc64_runlatch_off(void)
1080 {
1081 unsigned long ctrl;
1082
1083 if (cpu_has_feature(CPU_FTR_CTRL) && test_thread_flag(TIF_RUNLATCH)) {
1084 HMT_medium();
1085
1086 clear_thread_flag(TIF_RUNLATCH);
1087
1088 ctrl = mfspr(SPRN_CTRLF);
1089 ctrl &= ~CTRL_RUNLATCH;
1090 mtspr(SPRN_CTRLT, ctrl);
1091 }
1092 }
1093 #endif
1094
1095 #if THREAD_SHIFT < PAGE_SHIFT
1096
1097 static struct kmem_cache *thread_info_cache;
1098
alloc_thread_info(struct task_struct * tsk)1099 struct thread_info *alloc_thread_info(struct task_struct *tsk)
1100 {
1101 struct thread_info *ti;
1102
1103 ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
1104 if (unlikely(ti == NULL))
1105 return NULL;
1106 #ifdef CONFIG_DEBUG_STACK_USAGE
1107 memset(ti, 0, THREAD_SIZE);
1108 #endif
1109 return ti;
1110 }
1111
free_thread_info(struct thread_info * ti)1112 void free_thread_info(struct thread_info *ti)
1113 {
1114 kmem_cache_free(thread_info_cache, ti);
1115 }
1116
thread_info_cache_init(void)1117 void thread_info_cache_init(void)
1118 {
1119 thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
1120 THREAD_SIZE, 0, NULL);
1121 BUG_ON(thread_info_cache == NULL);
1122 }
1123
1124 #endif /* THREAD_SHIFT < PAGE_SHIFT */
1125