1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Derived from "arch/i386/kernel/process.c"
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
7 * Paul Mackerras (paulus@cs.anu.edu.au)
8 *
9 * PowerPC version
10 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
11 */
12
13 #include <linux/errno.h>
14 #include <linux/sched.h>
15 #include <linux/sched/debug.h>
16 #include <linux/sched/task.h>
17 #include <linux/sched/task_stack.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/smp.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/elf.h>
27 #include <linux/prctl.h>
28 #include <linux/init_task.h>
29 #include <linux/export.h>
30 #include <linux/kallsyms.h>
31 #include <linux/mqueue.h>
32 #include <linux/hardirq.h>
33 #include <linux/utsname.h>
34 #include <linux/ftrace.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/personality.h>
37 #include <linux/random.h>
38 #include <linux/hw_breakpoint.h>
39 #include <linux/uaccess.h>
40 #include <linux/elf-randomize.h>
41 #include <linux/pkeys.h>
42 #include <linux/seq_buf.h>
43
44 #include <asm/pgtable.h>
45 #include <asm/io.h>
46 #include <asm/processor.h>
47 #include <asm/mmu.h>
48 #include <asm/prom.h>
49 #include <asm/machdep.h>
50 #include <asm/time.h>
51 #include <asm/runlatch.h>
52 #include <asm/syscalls.h>
53 #include <asm/switch_to.h>
54 #include <asm/tm.h>
55 #include <asm/debug.h>
56 #ifdef CONFIG_PPC64
57 #include <asm/firmware.h>
58 #include <asm/hw_irq.h>
59 #endif
60 #include <asm/code-patching.h>
61 #include <asm/exec.h>
62 #include <asm/livepatch.h>
63 #include <asm/cpu_has_feature.h>
64 #include <asm/asm-prototypes.h>
65 #include <asm/stacktrace.h>
66 #include <asm/hw_breakpoint.h>
67
68 #include <linux/kprobes.h>
69 #include <linux/kdebug.h>
70
71 /* Transactional Memory debug */
72 #ifdef TM_DEBUG_SW
73 #define TM_DEBUG(x...) printk(KERN_INFO x)
74 #else
75 #define TM_DEBUG(x...) do { } while(0)
76 #endif
77
78 extern unsigned long _get_SP(void);
79
80 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
81 /*
82 * Are we running in "Suspend disabled" mode? If so we have to block any
83 * sigreturn that would get us into suspended state, and we also warn in some
84 * other paths that we should never reach with suspend disabled.
85 */
86 bool tm_suspend_disabled __ro_after_init = false;
87
check_if_tm_restore_required(struct task_struct * tsk)88 static void check_if_tm_restore_required(struct task_struct *tsk)
89 {
90 /*
91 * If we are saving the current thread's registers, and the
92 * thread is in a transactional state, set the TIF_RESTORE_TM
93 * bit so that we know to restore the registers before
94 * returning to userspace.
95 */
96 if (tsk == current && tsk->thread.regs &&
97 MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
98 !test_thread_flag(TIF_RESTORE_TM)) {
99 tsk->thread.ckpt_regs.msr = tsk->thread.regs->msr;
100 set_thread_flag(TIF_RESTORE_TM);
101 }
102 }
103
104 #else
check_if_tm_restore_required(struct task_struct * tsk)105 static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
106 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
107
108 bool strict_msr_control;
109 EXPORT_SYMBOL(strict_msr_control);
110
enable_strict_msr_control(char * str)111 static int __init enable_strict_msr_control(char *str)
112 {
113 strict_msr_control = true;
114 pr_info("Enabling strict facility control\n");
115
116 return 0;
117 }
118 early_param("ppc_strict_facility_enable", enable_strict_msr_control);
119
120 /* notrace because it's called by restore_math */
msr_check_and_set(unsigned long bits)121 unsigned long notrace msr_check_and_set(unsigned long bits)
122 {
123 unsigned long oldmsr = mfmsr();
124 unsigned long newmsr;
125
126 newmsr = oldmsr | bits;
127
128 #ifdef CONFIG_VSX
129 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
130 newmsr |= MSR_VSX;
131 #endif
132
133 if (oldmsr != newmsr)
134 mtmsr_isync(newmsr);
135
136 return newmsr;
137 }
138 EXPORT_SYMBOL_GPL(msr_check_and_set);
139
140 /* notrace because it's called by restore_math */
__msr_check_and_clear(unsigned long bits)141 void notrace __msr_check_and_clear(unsigned long bits)
142 {
143 unsigned long oldmsr = mfmsr();
144 unsigned long newmsr;
145
146 newmsr = oldmsr & ~bits;
147
148 #ifdef CONFIG_VSX
149 if (cpu_has_feature(CPU_FTR_VSX) && (bits & MSR_FP))
150 newmsr &= ~MSR_VSX;
151 #endif
152
153 if (oldmsr != newmsr)
154 mtmsr_isync(newmsr);
155 }
156 EXPORT_SYMBOL(__msr_check_and_clear);
157
158 #ifdef CONFIG_PPC_FPU
__giveup_fpu(struct task_struct * tsk)159 static void __giveup_fpu(struct task_struct *tsk)
160 {
161 unsigned long msr;
162
163 save_fpu(tsk);
164 msr = tsk->thread.regs->msr;
165 msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
166 #ifdef CONFIG_VSX
167 if (cpu_has_feature(CPU_FTR_VSX))
168 msr &= ~MSR_VSX;
169 #endif
170 tsk->thread.regs->msr = msr;
171 }
172
giveup_fpu(struct task_struct * tsk)173 void giveup_fpu(struct task_struct *tsk)
174 {
175 check_if_tm_restore_required(tsk);
176
177 msr_check_and_set(MSR_FP);
178 __giveup_fpu(tsk);
179 msr_check_and_clear(MSR_FP);
180 }
181 EXPORT_SYMBOL(giveup_fpu);
182
183 /*
184 * Make sure the floating-point register state in the
185 * the thread_struct is up to date for task tsk.
186 */
flush_fp_to_thread(struct task_struct * tsk)187 void flush_fp_to_thread(struct task_struct *tsk)
188 {
189 if (tsk->thread.regs) {
190 /*
191 * We need to disable preemption here because if we didn't,
192 * another process could get scheduled after the regs->msr
193 * test but before we have finished saving the FP registers
194 * to the thread_struct. That process could take over the
195 * FPU, and then when we get scheduled again we would store
196 * bogus values for the remaining FP registers.
197 */
198 preempt_disable();
199 if (tsk->thread.regs->msr & MSR_FP) {
200 /*
201 * This should only ever be called for current or
202 * for a stopped child process. Since we save away
203 * the FP register state on context switch,
204 * there is something wrong if a stopped child appears
205 * to still have its FP state in the CPU registers.
206 */
207 BUG_ON(tsk != current);
208 giveup_fpu(tsk);
209 }
210 preempt_enable();
211 }
212 }
213 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
214
enable_kernel_fp(void)215 void enable_kernel_fp(void)
216 {
217 unsigned long cpumsr;
218
219 WARN_ON(preemptible());
220
221 cpumsr = msr_check_and_set(MSR_FP);
222
223 if (current->thread.regs && (current->thread.regs->msr & MSR_FP)) {
224 check_if_tm_restore_required(current);
225 /*
226 * If a thread has already been reclaimed then the
227 * checkpointed registers are on the CPU but have definitely
228 * been saved by the reclaim code. Don't need to and *cannot*
229 * giveup as this would save to the 'live' structure not the
230 * checkpointed structure.
231 */
232 if (!MSR_TM_ACTIVE(cpumsr) &&
233 MSR_TM_ACTIVE(current->thread.regs->msr))
234 return;
235 __giveup_fpu(current);
236 }
237 }
238 EXPORT_SYMBOL(enable_kernel_fp);
239
restore_fp(struct task_struct * tsk)240 static int restore_fp(struct task_struct *tsk)
241 {
242 if (tsk->thread.load_fp) {
243 load_fp_state(¤t->thread.fp_state);
244 current->thread.load_fp++;
245 return 1;
246 }
247 return 0;
248 }
249 #else
restore_fp(struct task_struct * tsk)250 static int restore_fp(struct task_struct *tsk) { return 0; }
251 #endif /* CONFIG_PPC_FPU */
252
253 #ifdef CONFIG_ALTIVEC
254 #define loadvec(thr) ((thr).load_vec)
255
__giveup_altivec(struct task_struct * tsk)256 static void __giveup_altivec(struct task_struct *tsk)
257 {
258 unsigned long msr;
259
260 save_altivec(tsk);
261 msr = tsk->thread.regs->msr;
262 msr &= ~MSR_VEC;
263 #ifdef CONFIG_VSX
264 if (cpu_has_feature(CPU_FTR_VSX))
265 msr &= ~MSR_VSX;
266 #endif
267 tsk->thread.regs->msr = msr;
268 }
269
giveup_altivec(struct task_struct * tsk)270 void giveup_altivec(struct task_struct *tsk)
271 {
272 check_if_tm_restore_required(tsk);
273
274 msr_check_and_set(MSR_VEC);
275 __giveup_altivec(tsk);
276 msr_check_and_clear(MSR_VEC);
277 }
278 EXPORT_SYMBOL(giveup_altivec);
279
enable_kernel_altivec(void)280 void enable_kernel_altivec(void)
281 {
282 unsigned long cpumsr;
283
284 WARN_ON(preemptible());
285
286 cpumsr = msr_check_and_set(MSR_VEC);
287
288 if (current->thread.regs && (current->thread.regs->msr & MSR_VEC)) {
289 check_if_tm_restore_required(current);
290 /*
291 * If a thread has already been reclaimed then the
292 * checkpointed registers are on the CPU but have definitely
293 * been saved by the reclaim code. Don't need to and *cannot*
294 * giveup as this would save to the 'live' structure not the
295 * checkpointed structure.
296 */
297 if (!MSR_TM_ACTIVE(cpumsr) &&
298 MSR_TM_ACTIVE(current->thread.regs->msr))
299 return;
300 __giveup_altivec(current);
301 }
302 }
303 EXPORT_SYMBOL(enable_kernel_altivec);
304
305 /*
306 * Make sure the VMX/Altivec register state in the
307 * the thread_struct is up to date for task tsk.
308 */
flush_altivec_to_thread(struct task_struct * tsk)309 void flush_altivec_to_thread(struct task_struct *tsk)
310 {
311 if (tsk->thread.regs) {
312 preempt_disable();
313 if (tsk->thread.regs->msr & MSR_VEC) {
314 BUG_ON(tsk != current);
315 giveup_altivec(tsk);
316 }
317 preempt_enable();
318 }
319 }
320 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
321
restore_altivec(struct task_struct * tsk)322 static int restore_altivec(struct task_struct *tsk)
323 {
324 if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
325 load_vr_state(&tsk->thread.vr_state);
326 tsk->thread.used_vr = 1;
327 tsk->thread.load_vec++;
328
329 return 1;
330 }
331 return 0;
332 }
333 #else
334 #define loadvec(thr) 0
restore_altivec(struct task_struct * tsk)335 static inline int restore_altivec(struct task_struct *tsk) { return 0; }
336 #endif /* CONFIG_ALTIVEC */
337
338 #ifdef CONFIG_VSX
__giveup_vsx(struct task_struct * tsk)339 static void __giveup_vsx(struct task_struct *tsk)
340 {
341 unsigned long msr = tsk->thread.regs->msr;
342
343 /*
344 * We should never be ssetting MSR_VSX without also setting
345 * MSR_FP and MSR_VEC
346 */
347 WARN_ON((msr & MSR_VSX) && !((msr & MSR_FP) && (msr & MSR_VEC)));
348
349 /* __giveup_fpu will clear MSR_VSX */
350 if (msr & MSR_FP)
351 __giveup_fpu(tsk);
352 if (msr & MSR_VEC)
353 __giveup_altivec(tsk);
354 }
355
giveup_vsx(struct task_struct * tsk)356 static void giveup_vsx(struct task_struct *tsk)
357 {
358 check_if_tm_restore_required(tsk);
359
360 msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
361 __giveup_vsx(tsk);
362 msr_check_and_clear(MSR_FP|MSR_VEC|MSR_VSX);
363 }
364
enable_kernel_vsx(void)365 void enable_kernel_vsx(void)
366 {
367 unsigned long cpumsr;
368
369 WARN_ON(preemptible());
370
371 cpumsr = msr_check_and_set(MSR_FP|MSR_VEC|MSR_VSX);
372
373 if (current->thread.regs &&
374 (current->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP))) {
375 check_if_tm_restore_required(current);
376 /*
377 * If a thread has already been reclaimed then the
378 * checkpointed registers are on the CPU but have definitely
379 * been saved by the reclaim code. Don't need to and *cannot*
380 * giveup as this would save to the 'live' structure not the
381 * checkpointed structure.
382 */
383 if (!MSR_TM_ACTIVE(cpumsr) &&
384 MSR_TM_ACTIVE(current->thread.regs->msr))
385 return;
386 __giveup_vsx(current);
387 }
388 }
389 EXPORT_SYMBOL(enable_kernel_vsx);
390
flush_vsx_to_thread(struct task_struct * tsk)391 void flush_vsx_to_thread(struct task_struct *tsk)
392 {
393 if (tsk->thread.regs) {
394 preempt_disable();
395 if (tsk->thread.regs->msr & (MSR_VSX|MSR_VEC|MSR_FP)) {
396 BUG_ON(tsk != current);
397 giveup_vsx(tsk);
398 }
399 preempt_enable();
400 }
401 }
402 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
403
restore_vsx(struct task_struct * tsk)404 static int restore_vsx(struct task_struct *tsk)
405 {
406 if (cpu_has_feature(CPU_FTR_VSX)) {
407 tsk->thread.used_vsr = 1;
408 return 1;
409 }
410
411 return 0;
412 }
413 #else
restore_vsx(struct task_struct * tsk)414 static inline int restore_vsx(struct task_struct *tsk) { return 0; }
415 #endif /* CONFIG_VSX */
416
417 #ifdef CONFIG_SPE
giveup_spe(struct task_struct * tsk)418 void giveup_spe(struct task_struct *tsk)
419 {
420 check_if_tm_restore_required(tsk);
421
422 msr_check_and_set(MSR_SPE);
423 __giveup_spe(tsk);
424 msr_check_and_clear(MSR_SPE);
425 }
426 EXPORT_SYMBOL(giveup_spe);
427
enable_kernel_spe(void)428 void enable_kernel_spe(void)
429 {
430 WARN_ON(preemptible());
431
432 msr_check_and_set(MSR_SPE);
433
434 if (current->thread.regs && (current->thread.regs->msr & MSR_SPE)) {
435 check_if_tm_restore_required(current);
436 __giveup_spe(current);
437 }
438 }
439 EXPORT_SYMBOL(enable_kernel_spe);
440
flush_spe_to_thread(struct task_struct * tsk)441 void flush_spe_to_thread(struct task_struct *tsk)
442 {
443 if (tsk->thread.regs) {
444 preempt_disable();
445 if (tsk->thread.regs->msr & MSR_SPE) {
446 BUG_ON(tsk != current);
447 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
448 giveup_spe(tsk);
449 }
450 preempt_enable();
451 }
452 }
453 #endif /* CONFIG_SPE */
454
455 static unsigned long msr_all_available;
456
init_msr_all_available(void)457 static int __init init_msr_all_available(void)
458 {
459 #ifdef CONFIG_PPC_FPU
460 msr_all_available |= MSR_FP;
461 #endif
462 #ifdef CONFIG_ALTIVEC
463 if (cpu_has_feature(CPU_FTR_ALTIVEC))
464 msr_all_available |= MSR_VEC;
465 #endif
466 #ifdef CONFIG_VSX
467 if (cpu_has_feature(CPU_FTR_VSX))
468 msr_all_available |= MSR_VSX;
469 #endif
470 #ifdef CONFIG_SPE
471 if (cpu_has_feature(CPU_FTR_SPE))
472 msr_all_available |= MSR_SPE;
473 #endif
474
475 return 0;
476 }
477 early_initcall(init_msr_all_available);
478
giveup_all(struct task_struct * tsk)479 void giveup_all(struct task_struct *tsk)
480 {
481 unsigned long usermsr;
482
483 if (!tsk->thread.regs)
484 return;
485
486 check_if_tm_restore_required(tsk);
487
488 usermsr = tsk->thread.regs->msr;
489
490 if ((usermsr & msr_all_available) == 0)
491 return;
492
493 msr_check_and_set(msr_all_available);
494
495 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
496
497 #ifdef CONFIG_PPC_FPU
498 if (usermsr & MSR_FP)
499 __giveup_fpu(tsk);
500 #endif
501 #ifdef CONFIG_ALTIVEC
502 if (usermsr & MSR_VEC)
503 __giveup_altivec(tsk);
504 #endif
505 #ifdef CONFIG_SPE
506 if (usermsr & MSR_SPE)
507 __giveup_spe(tsk);
508 #endif
509
510 msr_check_and_clear(msr_all_available);
511 }
512 EXPORT_SYMBOL(giveup_all);
513
514 /*
515 * The exception exit path calls restore_math() with interrupts hard disabled
516 * but the soft irq state not "reconciled". ftrace code that calls
517 * local_irq_save/restore causes warnings.
518 *
519 * Rather than complicate the exit path, just don't trace restore_math. This
520 * could be done by having ftrace entry code check for this un-reconciled
521 * condition where MSR[EE]=0 and PACA_IRQ_HARD_DIS is not set, and
522 * temporarily fix it up for the duration of the ftrace call.
523 */
restore_math(struct pt_regs * regs)524 void notrace restore_math(struct pt_regs *regs)
525 {
526 unsigned long msr;
527
528 if (!MSR_TM_ACTIVE(regs->msr) &&
529 !current->thread.load_fp && !loadvec(current->thread))
530 return;
531
532 msr = regs->msr;
533 msr_check_and_set(msr_all_available);
534
535 /*
536 * Only reload if the bit is not set in the user MSR, the bit BEING set
537 * indicates that the registers are hot
538 */
539 if ((!(msr & MSR_FP)) && restore_fp(current))
540 msr |= MSR_FP | current->thread.fpexc_mode;
541
542 if ((!(msr & MSR_VEC)) && restore_altivec(current))
543 msr |= MSR_VEC;
544
545 if ((msr & (MSR_FP | MSR_VEC)) == (MSR_FP | MSR_VEC) &&
546 restore_vsx(current)) {
547 msr |= MSR_VSX;
548 }
549
550 msr_check_and_clear(msr_all_available);
551
552 regs->msr = msr;
553 }
554
save_all(struct task_struct * tsk)555 static void save_all(struct task_struct *tsk)
556 {
557 unsigned long usermsr;
558
559 if (!tsk->thread.regs)
560 return;
561
562 usermsr = tsk->thread.regs->msr;
563
564 if ((usermsr & msr_all_available) == 0)
565 return;
566
567 msr_check_and_set(msr_all_available);
568
569 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
570
571 if (usermsr & MSR_FP)
572 save_fpu(tsk);
573
574 if (usermsr & MSR_VEC)
575 save_altivec(tsk);
576
577 if (usermsr & MSR_SPE)
578 __giveup_spe(tsk);
579
580 msr_check_and_clear(msr_all_available);
581 thread_pkey_regs_save(&tsk->thread);
582 }
583
flush_all_to_thread(struct task_struct * tsk)584 void flush_all_to_thread(struct task_struct *tsk)
585 {
586 if (tsk->thread.regs) {
587 preempt_disable();
588 BUG_ON(tsk != current);
589 #ifdef CONFIG_SPE
590 if (tsk->thread.regs->msr & MSR_SPE)
591 tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
592 #endif
593 save_all(tsk);
594
595 preempt_enable();
596 }
597 }
598 EXPORT_SYMBOL(flush_all_to_thread);
599
600 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
do_send_trap(struct pt_regs * regs,unsigned long address,unsigned long error_code,int breakpt)601 void do_send_trap(struct pt_regs *regs, unsigned long address,
602 unsigned long error_code, int breakpt)
603 {
604 current->thread.trap_nr = TRAP_HWBKPT;
605 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
606 11, SIGSEGV) == NOTIFY_STOP)
607 return;
608
609 /* Deliver the signal to userspace */
610 force_sig_ptrace_errno_trap(breakpt, /* breakpoint or watchpoint id */
611 (void __user *)address);
612 }
613 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
do_break(struct pt_regs * regs,unsigned long address,unsigned long error_code)614 void do_break (struct pt_regs *regs, unsigned long address,
615 unsigned long error_code)
616 {
617 current->thread.trap_nr = TRAP_HWBKPT;
618 if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
619 11, SIGSEGV) == NOTIFY_STOP)
620 return;
621
622 if (debugger_break_match(regs))
623 return;
624
625 /* Clear the breakpoint */
626 hw_breakpoint_disable();
627
628 /* Deliver the signal to userspace */
629 force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void __user *)address);
630 }
631 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
632
633 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
634
635 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
636 /*
637 * Set the debug registers back to their default "safe" values.
638 */
set_debug_reg_defaults(struct thread_struct * thread)639 static void set_debug_reg_defaults(struct thread_struct *thread)
640 {
641 thread->debug.iac1 = thread->debug.iac2 = 0;
642 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
643 thread->debug.iac3 = thread->debug.iac4 = 0;
644 #endif
645 thread->debug.dac1 = thread->debug.dac2 = 0;
646 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
647 thread->debug.dvc1 = thread->debug.dvc2 = 0;
648 #endif
649 thread->debug.dbcr0 = 0;
650 #ifdef CONFIG_BOOKE
651 /*
652 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
653 */
654 thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
655 DBCR1_IAC3US | DBCR1_IAC4US;
656 /*
657 * Force Data Address Compare User/Supervisor bits to be User-only
658 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
659 */
660 thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
661 #else
662 thread->debug.dbcr1 = 0;
663 #endif
664 }
665
prime_debug_regs(struct debug_reg * debug)666 static void prime_debug_regs(struct debug_reg *debug)
667 {
668 /*
669 * We could have inherited MSR_DE from userspace, since
670 * it doesn't get cleared on exception entry. Make sure
671 * MSR_DE is clear before we enable any debug events.
672 */
673 mtmsr(mfmsr() & ~MSR_DE);
674
675 mtspr(SPRN_IAC1, debug->iac1);
676 mtspr(SPRN_IAC2, debug->iac2);
677 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
678 mtspr(SPRN_IAC3, debug->iac3);
679 mtspr(SPRN_IAC4, debug->iac4);
680 #endif
681 mtspr(SPRN_DAC1, debug->dac1);
682 mtspr(SPRN_DAC2, debug->dac2);
683 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
684 mtspr(SPRN_DVC1, debug->dvc1);
685 mtspr(SPRN_DVC2, debug->dvc2);
686 #endif
687 mtspr(SPRN_DBCR0, debug->dbcr0);
688 mtspr(SPRN_DBCR1, debug->dbcr1);
689 #ifdef CONFIG_BOOKE
690 mtspr(SPRN_DBCR2, debug->dbcr2);
691 #endif
692 }
693 /*
694 * Unless neither the old or new thread are making use of the
695 * debug registers, set the debug registers from the values
696 * stored in the new thread.
697 */
switch_booke_debug_regs(struct debug_reg * new_debug)698 void switch_booke_debug_regs(struct debug_reg *new_debug)
699 {
700 if ((current->thread.debug.dbcr0 & DBCR0_IDM)
701 || (new_debug->dbcr0 & DBCR0_IDM))
702 prime_debug_regs(new_debug);
703 }
704 EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
705 #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
706 #ifndef CONFIG_HAVE_HW_BREAKPOINT
set_breakpoint(struct arch_hw_breakpoint * brk)707 static void set_breakpoint(struct arch_hw_breakpoint *brk)
708 {
709 preempt_disable();
710 __set_breakpoint(brk);
711 preempt_enable();
712 }
713
set_debug_reg_defaults(struct thread_struct * thread)714 static void set_debug_reg_defaults(struct thread_struct *thread)
715 {
716 thread->hw_brk.address = 0;
717 thread->hw_brk.type = 0;
718 if (ppc_breakpoint_available())
719 set_breakpoint(&thread->hw_brk);
720 }
721 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
722 #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
723
724 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
__set_dabr(unsigned long dabr,unsigned long dabrx)725 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
726 {
727 mtspr(SPRN_DAC1, dabr);
728 #ifdef CONFIG_PPC_47x
729 isync();
730 #endif
731 return 0;
732 }
733 #elif defined(CONFIG_PPC_BOOK3S)
__set_dabr(unsigned long dabr,unsigned long dabrx)734 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
735 {
736 mtspr(SPRN_DABR, dabr);
737 if (cpu_has_feature(CPU_FTR_DABRX))
738 mtspr(SPRN_DABRX, dabrx);
739 return 0;
740 }
741 #elif defined(CONFIG_PPC_8xx)
__set_dabr(unsigned long dabr,unsigned long dabrx)742 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
743 {
744 unsigned long addr = dabr & ~HW_BRK_TYPE_DABR;
745 unsigned long lctrl1 = 0x90000000; /* compare type: equal on E & F */
746 unsigned long lctrl2 = 0x8e000002; /* watchpoint 1 on cmp E | F */
747
748 if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_READ)
749 lctrl1 |= 0xa0000;
750 else if ((dabr & HW_BRK_TYPE_RDWR) == HW_BRK_TYPE_WRITE)
751 lctrl1 |= 0xf0000;
752 else if ((dabr & HW_BRK_TYPE_RDWR) == 0)
753 lctrl2 = 0;
754
755 mtspr(SPRN_LCTRL2, 0);
756 mtspr(SPRN_CMPE, addr);
757 mtspr(SPRN_CMPF, addr + 4);
758 mtspr(SPRN_LCTRL1, lctrl1);
759 mtspr(SPRN_LCTRL2, lctrl2);
760
761 return 0;
762 }
763 #else
__set_dabr(unsigned long dabr,unsigned long dabrx)764 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
765 {
766 return -EINVAL;
767 }
768 #endif
769
set_dabr(struct arch_hw_breakpoint * brk)770 static inline int set_dabr(struct arch_hw_breakpoint *brk)
771 {
772 unsigned long dabr, dabrx;
773
774 dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
775 dabrx = ((brk->type >> 3) & 0x7);
776
777 if (ppc_md.set_dabr)
778 return ppc_md.set_dabr(dabr, dabrx);
779
780 return __set_dabr(dabr, dabrx);
781 }
782
__set_breakpoint(struct arch_hw_breakpoint * brk)783 void __set_breakpoint(struct arch_hw_breakpoint *brk)
784 {
785 memcpy(this_cpu_ptr(¤t_brk), brk, sizeof(*brk));
786
787 if (dawr_enabled())
788 // Power8 or later
789 set_dawr(brk);
790 else if (!cpu_has_feature(CPU_FTR_ARCH_207S))
791 // Power7 or earlier
792 set_dabr(brk);
793 else
794 // Shouldn't happen due to higher level checks
795 WARN_ON_ONCE(1);
796 }
797
798 /* Check if we have DAWR or DABR hardware */
ppc_breakpoint_available(void)799 bool ppc_breakpoint_available(void)
800 {
801 if (dawr_enabled())
802 return true; /* POWER8 DAWR or POWER9 forced DAWR */
803 if (cpu_has_feature(CPU_FTR_ARCH_207S))
804 return false; /* POWER9 with DAWR disabled */
805 /* DABR: Everything but POWER8 and POWER9 */
806 return true;
807 }
808 EXPORT_SYMBOL_GPL(ppc_breakpoint_available);
809
hw_brk_match(struct arch_hw_breakpoint * a,struct arch_hw_breakpoint * b)810 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
811 struct arch_hw_breakpoint *b)
812 {
813 if (a->address != b->address)
814 return false;
815 if (a->type != b->type)
816 return false;
817 if (a->len != b->len)
818 return false;
819 return true;
820 }
821
822 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
823
tm_enabled(struct task_struct * tsk)824 static inline bool tm_enabled(struct task_struct *tsk)
825 {
826 return tsk && tsk->thread.regs && (tsk->thread.regs->msr & MSR_TM);
827 }
828
tm_reclaim_thread(struct thread_struct * thr,uint8_t cause)829 static void tm_reclaim_thread(struct thread_struct *thr, uint8_t cause)
830 {
831 /*
832 * Use the current MSR TM suspended bit to track if we have
833 * checkpointed state outstanding.
834 * On signal delivery, we'd normally reclaim the checkpointed
835 * state to obtain stack pointer (see:get_tm_stackpointer()).
836 * This will then directly return to userspace without going
837 * through __switch_to(). However, if the stack frame is bad,
838 * we need to exit this thread which calls __switch_to() which
839 * will again attempt to reclaim the already saved tm state.
840 * Hence we need to check that we've not already reclaimed
841 * this state.
842 * We do this using the current MSR, rather tracking it in
843 * some specific thread_struct bit, as it has the additional
844 * benefit of checking for a potential TM bad thing exception.
845 */
846 if (!MSR_TM_SUSPENDED(mfmsr()))
847 return;
848
849 giveup_all(container_of(thr, struct task_struct, thread));
850
851 tm_reclaim(thr, cause);
852
853 /*
854 * If we are in a transaction and FP is off then we can't have
855 * used FP inside that transaction. Hence the checkpointed
856 * state is the same as the live state. We need to copy the
857 * live state to the checkpointed state so that when the
858 * transaction is restored, the checkpointed state is correct
859 * and the aborted transaction sees the correct state. We use
860 * ckpt_regs.msr here as that's what tm_reclaim will use to
861 * determine if it's going to write the checkpointed state or
862 * not. So either this will write the checkpointed registers,
863 * or reclaim will. Similarly for VMX.
864 */
865 if ((thr->ckpt_regs.msr & MSR_FP) == 0)
866 memcpy(&thr->ckfp_state, &thr->fp_state,
867 sizeof(struct thread_fp_state));
868 if ((thr->ckpt_regs.msr & MSR_VEC) == 0)
869 memcpy(&thr->ckvr_state, &thr->vr_state,
870 sizeof(struct thread_vr_state));
871 }
872
tm_reclaim_current(uint8_t cause)873 void tm_reclaim_current(uint8_t cause)
874 {
875 tm_enable();
876 tm_reclaim_thread(¤t->thread, cause);
877 }
878
tm_reclaim_task(struct task_struct * tsk)879 static inline void tm_reclaim_task(struct task_struct *tsk)
880 {
881 /* We have to work out if we're switching from/to a task that's in the
882 * middle of a transaction.
883 *
884 * In switching we need to maintain a 2nd register state as
885 * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
886 * checkpointed (tbegin) state in ckpt_regs, ckfp_state and
887 * ckvr_state
888 *
889 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
890 */
891 struct thread_struct *thr = &tsk->thread;
892
893 if (!thr->regs)
894 return;
895
896 if (!MSR_TM_ACTIVE(thr->regs->msr))
897 goto out_and_saveregs;
898
899 WARN_ON(tm_suspend_disabled);
900
901 TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
902 "ccr=%lx, msr=%lx, trap=%lx)\n",
903 tsk->pid, thr->regs->nip,
904 thr->regs->ccr, thr->regs->msr,
905 thr->regs->trap);
906
907 tm_reclaim_thread(thr, TM_CAUSE_RESCHED);
908
909 TM_DEBUG("--- tm_reclaim on pid %d complete\n",
910 tsk->pid);
911
912 out_and_saveregs:
913 /* Always save the regs here, even if a transaction's not active.
914 * This context-switches a thread's TM info SPRs. We do it here to
915 * be consistent with the restore path (in recheckpoint) which
916 * cannot happen later in _switch().
917 */
918 tm_save_sprs(thr);
919 }
920
921 extern void __tm_recheckpoint(struct thread_struct *thread);
922
tm_recheckpoint(struct thread_struct * thread)923 void tm_recheckpoint(struct thread_struct *thread)
924 {
925 unsigned long flags;
926
927 if (!(thread->regs->msr & MSR_TM))
928 return;
929
930 /* We really can't be interrupted here as the TEXASR registers can't
931 * change and later in the trecheckpoint code, we have a userspace R1.
932 * So let's hard disable over this region.
933 */
934 local_irq_save(flags);
935 hard_irq_disable();
936
937 /* The TM SPRs are restored here, so that TEXASR.FS can be set
938 * before the trecheckpoint and no explosion occurs.
939 */
940 tm_restore_sprs(thread);
941
942 __tm_recheckpoint(thread);
943
944 local_irq_restore(flags);
945 }
946
tm_recheckpoint_new_task(struct task_struct * new)947 static inline void tm_recheckpoint_new_task(struct task_struct *new)
948 {
949 if (!cpu_has_feature(CPU_FTR_TM))
950 return;
951
952 /* Recheckpoint the registers of the thread we're about to switch to.
953 *
954 * If the task was using FP, we non-lazily reload both the original and
955 * the speculative FP register states. This is because the kernel
956 * doesn't see if/when a TM rollback occurs, so if we take an FP
957 * unavailable later, we are unable to determine which set of FP regs
958 * need to be restored.
959 */
960 if (!tm_enabled(new))
961 return;
962
963 if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
964 tm_restore_sprs(&new->thread);
965 return;
966 }
967 /* Recheckpoint to restore original checkpointed register state. */
968 TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n",
969 new->pid, new->thread.regs->msr);
970
971 tm_recheckpoint(&new->thread);
972
973 /*
974 * The checkpointed state has been restored but the live state has
975 * not, ensure all the math functionality is turned off to trigger
976 * restore_math() to reload.
977 */
978 new->thread.regs->msr &= ~(MSR_FP | MSR_VEC | MSR_VSX);
979
980 TM_DEBUG("*** tm_recheckpoint of pid %d complete "
981 "(kernel msr 0x%lx)\n",
982 new->pid, mfmsr());
983 }
984
__switch_to_tm(struct task_struct * prev,struct task_struct * new)985 static inline void __switch_to_tm(struct task_struct *prev,
986 struct task_struct *new)
987 {
988 if (cpu_has_feature(CPU_FTR_TM)) {
989 if (tm_enabled(prev) || tm_enabled(new))
990 tm_enable();
991
992 if (tm_enabled(prev)) {
993 prev->thread.load_tm++;
994 tm_reclaim_task(prev);
995 if (!MSR_TM_ACTIVE(prev->thread.regs->msr) && prev->thread.load_tm == 0)
996 prev->thread.regs->msr &= ~MSR_TM;
997 }
998
999 tm_recheckpoint_new_task(new);
1000 }
1001 }
1002
1003 /*
1004 * This is called if we are on the way out to userspace and the
1005 * TIF_RESTORE_TM flag is set. It checks if we need to reload
1006 * FP and/or vector state and does so if necessary.
1007 * If userspace is inside a transaction (whether active or
1008 * suspended) and FP/VMX/VSX instructions have ever been enabled
1009 * inside that transaction, then we have to keep them enabled
1010 * and keep the FP/VMX/VSX state loaded while ever the transaction
1011 * continues. The reason is that if we didn't, and subsequently
1012 * got a FP/VMX/VSX unavailable interrupt inside a transaction,
1013 * we don't know whether it's the same transaction, and thus we
1014 * don't know which of the checkpointed state and the transactional
1015 * state to use.
1016 */
restore_tm_state(struct pt_regs * regs)1017 void restore_tm_state(struct pt_regs *regs)
1018 {
1019 unsigned long msr_diff;
1020
1021 /*
1022 * This is the only moment we should clear TIF_RESTORE_TM as
1023 * it is here that ckpt_regs.msr and pt_regs.msr become the same
1024 * again, anything else could lead to an incorrect ckpt_msr being
1025 * saved and therefore incorrect signal contexts.
1026 */
1027 clear_thread_flag(TIF_RESTORE_TM);
1028 if (!MSR_TM_ACTIVE(regs->msr))
1029 return;
1030
1031 msr_diff = current->thread.ckpt_regs.msr & ~regs->msr;
1032 msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
1033
1034 /* Ensure that restore_math() will restore */
1035 if (msr_diff & MSR_FP)
1036 current->thread.load_fp = 1;
1037 #ifdef CONFIG_ALTIVEC
1038 if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC)
1039 current->thread.load_vec = 1;
1040 #endif
1041 restore_math(regs);
1042
1043 regs->msr |= msr_diff;
1044 }
1045
1046 #else
1047 #define tm_recheckpoint_new_task(new)
1048 #define __switch_to_tm(prev, new)
1049 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1050
save_sprs(struct thread_struct * t)1051 static inline void save_sprs(struct thread_struct *t)
1052 {
1053 #ifdef CONFIG_ALTIVEC
1054 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1055 t->vrsave = mfspr(SPRN_VRSAVE);
1056 #endif
1057 #ifdef CONFIG_PPC_BOOK3S_64
1058 if (cpu_has_feature(CPU_FTR_DSCR))
1059 t->dscr = mfspr(SPRN_DSCR);
1060
1061 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1062 t->bescr = mfspr(SPRN_BESCR);
1063 t->ebbhr = mfspr(SPRN_EBBHR);
1064 t->ebbrr = mfspr(SPRN_EBBRR);
1065
1066 t->fscr = mfspr(SPRN_FSCR);
1067
1068 /*
1069 * Note that the TAR is not available for use in the kernel.
1070 * (To provide this, the TAR should be backed up/restored on
1071 * exception entry/exit instead, and be in pt_regs. FIXME,
1072 * this should be in pt_regs anyway (for debug).)
1073 */
1074 t->tar = mfspr(SPRN_TAR);
1075 }
1076 #endif
1077
1078 thread_pkey_regs_save(t);
1079 }
1080
restore_sprs(struct thread_struct * old_thread,struct thread_struct * new_thread)1081 static inline void restore_sprs(struct thread_struct *old_thread,
1082 struct thread_struct *new_thread)
1083 {
1084 #ifdef CONFIG_ALTIVEC
1085 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
1086 old_thread->vrsave != new_thread->vrsave)
1087 mtspr(SPRN_VRSAVE, new_thread->vrsave);
1088 #endif
1089 #ifdef CONFIG_PPC_BOOK3S_64
1090 if (cpu_has_feature(CPU_FTR_DSCR)) {
1091 u64 dscr = get_paca()->dscr_default;
1092 if (new_thread->dscr_inherit)
1093 dscr = new_thread->dscr;
1094
1095 if (old_thread->dscr != dscr)
1096 mtspr(SPRN_DSCR, dscr);
1097 }
1098
1099 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
1100 if (old_thread->bescr != new_thread->bescr)
1101 mtspr(SPRN_BESCR, new_thread->bescr);
1102 if (old_thread->ebbhr != new_thread->ebbhr)
1103 mtspr(SPRN_EBBHR, new_thread->ebbhr);
1104 if (old_thread->ebbrr != new_thread->ebbrr)
1105 mtspr(SPRN_EBBRR, new_thread->ebbrr);
1106
1107 if (old_thread->fscr != new_thread->fscr)
1108 mtspr(SPRN_FSCR, new_thread->fscr);
1109
1110 if (old_thread->tar != new_thread->tar)
1111 mtspr(SPRN_TAR, new_thread->tar);
1112 }
1113
1114 if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
1115 old_thread->tidr != new_thread->tidr)
1116 mtspr(SPRN_TIDR, new_thread->tidr);
1117 #endif
1118
1119 thread_pkey_regs_restore(new_thread, old_thread);
1120 }
1121
__switch_to(struct task_struct * prev,struct task_struct * new)1122 struct task_struct *__switch_to(struct task_struct *prev,
1123 struct task_struct *new)
1124 {
1125 struct thread_struct *new_thread, *old_thread;
1126 struct task_struct *last;
1127 #ifdef CONFIG_PPC_BOOK3S_64
1128 struct ppc64_tlb_batch *batch;
1129 #endif
1130
1131 new_thread = &new->thread;
1132 old_thread = ¤t->thread;
1133
1134 WARN_ON(!irqs_disabled());
1135
1136 #ifdef CONFIG_PPC_BOOK3S_64
1137 batch = this_cpu_ptr(&ppc64_tlb_batch);
1138 if (batch->active) {
1139 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
1140 if (batch->index)
1141 __flush_tlb_pending(batch);
1142 batch->active = 0;
1143 }
1144 #endif /* CONFIG_PPC_BOOK3S_64 */
1145
1146 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1147 switch_booke_debug_regs(&new->thread.debug);
1148 #else
1149 /*
1150 * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
1151 * schedule DABR
1152 */
1153 #ifndef CONFIG_HAVE_HW_BREAKPOINT
1154 if (unlikely(!hw_brk_match(this_cpu_ptr(¤t_brk), &new->thread.hw_brk)))
1155 __set_breakpoint(&new->thread.hw_brk);
1156 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1157 #endif
1158
1159 /*
1160 * We need to save SPRs before treclaim/trecheckpoint as these will
1161 * change a number of them.
1162 */
1163 save_sprs(&prev->thread);
1164
1165 /* Save FPU, Altivec, VSX and SPE state */
1166 giveup_all(prev);
1167
1168 __switch_to_tm(prev, new);
1169
1170 if (!radix_enabled()) {
1171 /*
1172 * We can't take a PMU exception inside _switch() since there
1173 * is a window where the kernel stack SLB and the kernel stack
1174 * are out of sync. Hard disable here.
1175 */
1176 hard_irq_disable();
1177 }
1178
1179 /*
1180 * Call restore_sprs() before calling _switch(). If we move it after
1181 * _switch() then we miss out on calling it for new tasks. The reason
1182 * for this is we manually create a stack frame for new tasks that
1183 * directly returns through ret_from_fork() or
1184 * ret_from_kernel_thread(). See copy_thread() for details.
1185 */
1186 restore_sprs(old_thread, new_thread);
1187
1188 last = _switch(old_thread, new_thread);
1189
1190 #ifdef CONFIG_PPC_BOOK3S_64
1191 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
1192 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
1193 batch = this_cpu_ptr(&ppc64_tlb_batch);
1194 batch->active = 1;
1195 }
1196
1197 if (current->thread.regs) {
1198 restore_math(current->thread.regs);
1199
1200 /*
1201 * The copy-paste buffer can only store into foreign real
1202 * addresses, so unprivileged processes can not see the
1203 * data or use it in any way unless they have foreign real
1204 * mappings. If the new process has the foreign real address
1205 * mappings, we must issue a cp_abort to clear any state and
1206 * prevent snooping, corruption or a covert channel.
1207 */
1208 if (current->thread.used_vas)
1209 asm volatile(PPC_CP_ABORT);
1210 }
1211 #endif /* CONFIG_PPC_BOOK3S_64 */
1212
1213 return last;
1214 }
1215
1216 #define NR_INSN_TO_PRINT 16
1217
show_instructions(struct pt_regs * regs)1218 static void show_instructions(struct pt_regs *regs)
1219 {
1220 int i;
1221 unsigned long nip = regs->nip;
1222 unsigned long pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1223
1224 printk("Instruction dump:");
1225
1226 /*
1227 * If we were executing with the MMU off for instructions, adjust pc
1228 * rather than printing XXXXXXXX.
1229 */
1230 if (!IS_ENABLED(CONFIG_BOOKE) && !(regs->msr & MSR_IR)) {
1231 pc = (unsigned long)phys_to_virt(pc);
1232 nip = (unsigned long)phys_to_virt(regs->nip);
1233 }
1234
1235 for (i = 0; i < NR_INSN_TO_PRINT; i++) {
1236 int instr;
1237
1238 if (!(i % 8))
1239 pr_cont("\n");
1240
1241 if (!__kernel_text_address(pc) ||
1242 probe_kernel_address((const void *)pc, instr)) {
1243 pr_cont("XXXXXXXX ");
1244 } else {
1245 if (nip == pc)
1246 pr_cont("<%08x> ", instr);
1247 else
1248 pr_cont("%08x ", instr);
1249 }
1250
1251 pc += sizeof(int);
1252 }
1253
1254 pr_cont("\n");
1255 }
1256
show_user_instructions(struct pt_regs * regs)1257 void show_user_instructions(struct pt_regs *regs)
1258 {
1259 unsigned long pc;
1260 int n = NR_INSN_TO_PRINT;
1261 struct seq_buf s;
1262 char buf[96]; /* enough for 8 times 9 + 2 chars */
1263
1264 pc = regs->nip - (NR_INSN_TO_PRINT * 3 / 4 * sizeof(int));
1265
1266 /*
1267 * Make sure the NIP points at userspace, not kernel text/data or
1268 * elsewhere.
1269 */
1270 if (!__access_ok(pc, NR_INSN_TO_PRINT * sizeof(int), USER_DS)) {
1271 pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
1272 current->comm, current->pid);
1273 return;
1274 }
1275
1276 seq_buf_init(&s, buf, sizeof(buf));
1277
1278 while (n) {
1279 int i;
1280
1281 seq_buf_clear(&s);
1282
1283 for (i = 0; i < 8 && n; i++, n--, pc += sizeof(int)) {
1284 int instr;
1285
1286 if (probe_kernel_address((const void *)pc, instr)) {
1287 seq_buf_printf(&s, "XXXXXXXX ");
1288 continue;
1289 }
1290 seq_buf_printf(&s, regs->nip == pc ? "<%08x> " : "%08x ", instr);
1291 }
1292
1293 if (!seq_buf_has_overflowed(&s))
1294 pr_info("%s[%d]: code: %s\n", current->comm,
1295 current->pid, s.buffer);
1296 }
1297 }
1298
1299 struct regbit {
1300 unsigned long bit;
1301 const char *name;
1302 };
1303
1304 static struct regbit msr_bits[] = {
1305 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
1306 {MSR_SF, "SF"},
1307 {MSR_HV, "HV"},
1308 #endif
1309 {MSR_VEC, "VEC"},
1310 {MSR_VSX, "VSX"},
1311 #ifdef CONFIG_BOOKE
1312 {MSR_CE, "CE"},
1313 #endif
1314 {MSR_EE, "EE"},
1315 {MSR_PR, "PR"},
1316 {MSR_FP, "FP"},
1317 {MSR_ME, "ME"},
1318 #ifdef CONFIG_BOOKE
1319 {MSR_DE, "DE"},
1320 #else
1321 {MSR_SE, "SE"},
1322 {MSR_BE, "BE"},
1323 #endif
1324 {MSR_IR, "IR"},
1325 {MSR_DR, "DR"},
1326 {MSR_PMM, "PMM"},
1327 #ifndef CONFIG_BOOKE
1328 {MSR_RI, "RI"},
1329 {MSR_LE, "LE"},
1330 #endif
1331 {0, NULL}
1332 };
1333
print_bits(unsigned long val,struct regbit * bits,const char * sep)1334 static void print_bits(unsigned long val, struct regbit *bits, const char *sep)
1335 {
1336 const char *s = "";
1337
1338 for (; bits->bit; ++bits)
1339 if (val & bits->bit) {
1340 pr_cont("%s%s", s, bits->name);
1341 s = sep;
1342 }
1343 }
1344
1345 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1346 static struct regbit msr_tm_bits[] = {
1347 {MSR_TS_T, "T"},
1348 {MSR_TS_S, "S"},
1349 {MSR_TM, "E"},
1350 {0, NULL}
1351 };
1352
print_tm_bits(unsigned long val)1353 static void print_tm_bits(unsigned long val)
1354 {
1355 /*
1356 * This only prints something if at least one of the TM bit is set.
1357 * Inside the TM[], the output means:
1358 * E: Enabled (bit 32)
1359 * S: Suspended (bit 33)
1360 * T: Transactional (bit 34)
1361 */
1362 if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) {
1363 pr_cont(",TM[");
1364 print_bits(val, msr_tm_bits, "");
1365 pr_cont("]");
1366 }
1367 }
1368 #else
print_tm_bits(unsigned long val)1369 static void print_tm_bits(unsigned long val) {}
1370 #endif
1371
print_msr_bits(unsigned long val)1372 static void print_msr_bits(unsigned long val)
1373 {
1374 pr_cont("<");
1375 print_bits(val, msr_bits, ",");
1376 print_tm_bits(val);
1377 pr_cont(">");
1378 }
1379
1380 #ifdef CONFIG_PPC64
1381 #define REG "%016lx"
1382 #define REGS_PER_LINE 4
1383 #define LAST_VOLATILE 13
1384 #else
1385 #define REG "%08lx"
1386 #define REGS_PER_LINE 8
1387 #define LAST_VOLATILE 12
1388 #endif
1389
show_regs(struct pt_regs * regs)1390 void show_regs(struct pt_regs * regs)
1391 {
1392 int i, trap;
1393
1394 show_regs_print_info(KERN_DEFAULT);
1395
1396 printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
1397 regs->nip, regs->link, regs->ctr);
1398 printk("REGS: %px TRAP: %04lx %s (%s)\n",
1399 regs, regs->trap, print_tainted(), init_utsname()->release);
1400 printk("MSR: "REG" ", regs->msr);
1401 print_msr_bits(regs->msr);
1402 pr_cont(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
1403 trap = TRAP(regs);
1404 if ((TRAP(regs) != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
1405 pr_cont("CFAR: "REG" ", regs->orig_gpr3);
1406 if (trap == 0x200 || trap == 0x300 || trap == 0x600)
1407 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
1408 pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
1409 #else
1410 pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
1411 #endif
1412 #ifdef CONFIG_PPC64
1413 pr_cont("IRQMASK: %lx ", regs->softe);
1414 #endif
1415 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1416 if (MSR_TM_ACTIVE(regs->msr))
1417 pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
1418 #endif
1419
1420 for (i = 0; i < 32; i++) {
1421 if ((i % REGS_PER_LINE) == 0)
1422 pr_cont("\nGPR%02d: ", i);
1423 pr_cont(REG " ", regs->gpr[i]);
1424 if (i == LAST_VOLATILE && !FULL_REGS(regs))
1425 break;
1426 }
1427 pr_cont("\n");
1428 #ifdef CONFIG_KALLSYMS
1429 /*
1430 * Lookup NIP late so we have the best change of getting the
1431 * above info out without failing
1432 */
1433 printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
1434 printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
1435 #endif
1436 show_stack(current, (unsigned long *) regs->gpr[1]);
1437 if (!user_mode(regs))
1438 show_instructions(regs);
1439 }
1440
flush_thread(void)1441 void flush_thread(void)
1442 {
1443 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1444 flush_ptrace_hw_breakpoint(current);
1445 #else /* CONFIG_HAVE_HW_BREAKPOINT */
1446 set_debug_reg_defaults(¤t->thread);
1447 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
1448 }
1449
1450 #ifdef CONFIG_PPC_BOOK3S_64
arch_setup_new_exec(void)1451 void arch_setup_new_exec(void)
1452 {
1453 if (radix_enabled())
1454 return;
1455 hash__setup_new_exec();
1456 }
1457 #endif
1458
set_thread_uses_vas(void)1459 int set_thread_uses_vas(void)
1460 {
1461 #ifdef CONFIG_PPC_BOOK3S_64
1462 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1463 return -EINVAL;
1464
1465 current->thread.used_vas = 1;
1466
1467 /*
1468 * Even a process that has no foreign real address mapping can use
1469 * an unpaired COPY instruction (to no real effect). Issue CP_ABORT
1470 * to clear any pending COPY and prevent a covert channel.
1471 *
1472 * __switch_to() will issue CP_ABORT on future context switches.
1473 */
1474 asm volatile(PPC_CP_ABORT);
1475
1476 #endif /* CONFIG_PPC_BOOK3S_64 */
1477 return 0;
1478 }
1479
1480 #ifdef CONFIG_PPC64
1481 /**
1482 * Assign a TIDR (thread ID) for task @t and set it in the thread
1483 * structure. For now, we only support setting TIDR for 'current' task.
1484 *
1485 * Since the TID value is a truncated form of it PID, it is possible
1486 * (but unlikely) for 2 threads to have the same TID. In the unlikely event
1487 * that 2 threads share the same TID and are waiting, one of the following
1488 * cases will happen:
1489 *
1490 * 1. The correct thread is running, the wrong thread is not
1491 * In this situation, the correct thread is woken and proceeds to pass it's
1492 * condition check.
1493 *
1494 * 2. Neither threads are running
1495 * In this situation, neither thread will be woken. When scheduled, the waiting
1496 * threads will execute either a wait, which will return immediately, followed
1497 * by a condition check, which will pass for the correct thread and fail
1498 * for the wrong thread, or they will execute the condition check immediately.
1499 *
1500 * 3. The wrong thread is running, the correct thread is not
1501 * The wrong thread will be woken, but will fail it's condition check and
1502 * re-execute wait. The correct thread, when scheduled, will execute either
1503 * it's condition check (which will pass), or wait, which returns immediately
1504 * when called the first time after the thread is scheduled, followed by it's
1505 * condition check (which will pass).
1506 *
1507 * 4. Both threads are running
1508 * Both threads will be woken. The wrong thread will fail it's condition check
1509 * and execute another wait, while the correct thread will pass it's condition
1510 * check.
1511 *
1512 * @t: the task to set the thread ID for
1513 */
set_thread_tidr(struct task_struct * t)1514 int set_thread_tidr(struct task_struct *t)
1515 {
1516 if (!cpu_has_feature(CPU_FTR_P9_TIDR))
1517 return -EINVAL;
1518
1519 if (t != current)
1520 return -EINVAL;
1521
1522 if (t->thread.tidr)
1523 return 0;
1524
1525 t->thread.tidr = (u16)task_pid_nr(t);
1526 mtspr(SPRN_TIDR, t->thread.tidr);
1527
1528 return 0;
1529 }
1530 EXPORT_SYMBOL_GPL(set_thread_tidr);
1531
1532 #endif /* CONFIG_PPC64 */
1533
1534 void
release_thread(struct task_struct * t)1535 release_thread(struct task_struct *t)
1536 {
1537 }
1538
1539 /*
1540 * this gets called so that we can store coprocessor state into memory and
1541 * copy the current task into the new thread.
1542 */
arch_dup_task_struct(struct task_struct * dst,struct task_struct * src)1543 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
1544 {
1545 flush_all_to_thread(src);
1546 /*
1547 * Flush TM state out so we can copy it. __switch_to_tm() does this
1548 * flush but it removes the checkpointed state from the current CPU and
1549 * transitions the CPU out of TM mode. Hence we need to call
1550 * tm_recheckpoint_new_task() (on the same task) to restore the
1551 * checkpointed state back and the TM mode.
1552 *
1553 * Can't pass dst because it isn't ready. Doesn't matter, passing
1554 * dst is only important for __switch_to()
1555 */
1556 __switch_to_tm(src, src);
1557
1558 *dst = *src;
1559
1560 clear_task_ebb(dst);
1561
1562 return 0;
1563 }
1564
setup_ksp_vsid(struct task_struct * p,unsigned long sp)1565 static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
1566 {
1567 #ifdef CONFIG_PPC_BOOK3S_64
1568 unsigned long sp_vsid;
1569 unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
1570
1571 if (radix_enabled())
1572 return;
1573
1574 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1575 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1576 << SLB_VSID_SHIFT_1T;
1577 else
1578 sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1579 << SLB_VSID_SHIFT;
1580 sp_vsid |= SLB_VSID_KERNEL | llp;
1581 p->thread.ksp_vsid = sp_vsid;
1582 #endif
1583 }
1584
1585 /*
1586 * Copy a thread..
1587 */
1588
1589 /*
1590 * Copy architecture-specific thread state
1591 */
copy_thread_tls(unsigned long clone_flags,unsigned long usp,unsigned long kthread_arg,struct task_struct * p,unsigned long tls)1592 int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
1593 unsigned long kthread_arg, struct task_struct *p,
1594 unsigned long tls)
1595 {
1596 struct pt_regs *childregs, *kregs;
1597 extern void ret_from_fork(void);
1598 extern void ret_from_kernel_thread(void);
1599 void (*f)(void);
1600 unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
1601 struct thread_info *ti = task_thread_info(p);
1602
1603 klp_init_thread_info(p);
1604
1605 /* Copy registers */
1606 sp -= sizeof(struct pt_regs);
1607 childregs = (struct pt_regs *) sp;
1608 if (unlikely(p->flags & PF_KTHREAD)) {
1609 /* kernel thread */
1610 memset(childregs, 0, sizeof(struct pt_regs));
1611 childregs->gpr[1] = sp + sizeof(struct pt_regs);
1612 /* function */
1613 if (usp)
1614 childregs->gpr[14] = ppc_function_entry((void *)usp);
1615 #ifdef CONFIG_PPC64
1616 clear_tsk_thread_flag(p, TIF_32BIT);
1617 childregs->softe = IRQS_ENABLED;
1618 #endif
1619 childregs->gpr[15] = kthread_arg;
1620 p->thread.regs = NULL; /* no user register state */
1621 ti->flags |= _TIF_RESTOREALL;
1622 f = ret_from_kernel_thread;
1623 } else {
1624 /* user thread */
1625 struct pt_regs *regs = current_pt_regs();
1626 CHECK_FULL_REGS(regs);
1627 *childregs = *regs;
1628 if (usp)
1629 childregs->gpr[1] = usp;
1630 p->thread.regs = childregs;
1631 childregs->gpr[3] = 0; /* Result from fork() */
1632 if (clone_flags & CLONE_SETTLS) {
1633 #ifdef CONFIG_PPC64
1634 if (!is_32bit_task())
1635 childregs->gpr[13] = tls;
1636 else
1637 #endif
1638 childregs->gpr[2] = tls;
1639 }
1640
1641 f = ret_from_fork;
1642 }
1643 childregs->msr &= ~(MSR_FP|MSR_VEC|MSR_VSX);
1644 sp -= STACK_FRAME_OVERHEAD;
1645
1646 /*
1647 * The way this works is that at some point in the future
1648 * some task will call _switch to switch to the new task.
1649 * That will pop off the stack frame created below and start
1650 * the new task running at ret_from_fork. The new task will
1651 * do some house keeping and then return from the fork or clone
1652 * system call, using the stack frame created above.
1653 */
1654 ((unsigned long *)sp)[0] = 0;
1655 sp -= sizeof(struct pt_regs);
1656 kregs = (struct pt_regs *) sp;
1657 sp -= STACK_FRAME_OVERHEAD;
1658 p->thread.ksp = sp;
1659 #ifdef CONFIG_PPC32
1660 p->thread.ksp_limit = (unsigned long)end_of_stack(p);
1661 #endif
1662 #ifdef CONFIG_HAVE_HW_BREAKPOINT
1663 p->thread.ptrace_bps[0] = NULL;
1664 #endif
1665
1666 p->thread.fp_save_area = NULL;
1667 #ifdef CONFIG_ALTIVEC
1668 p->thread.vr_save_area = NULL;
1669 #endif
1670
1671 setup_ksp_vsid(p, sp);
1672
1673 #ifdef CONFIG_PPC64
1674 if (cpu_has_feature(CPU_FTR_DSCR)) {
1675 p->thread.dscr_inherit = current->thread.dscr_inherit;
1676 p->thread.dscr = mfspr(SPRN_DSCR);
1677 }
1678 if (cpu_has_feature(CPU_FTR_HAS_PPR))
1679 childregs->ppr = DEFAULT_PPR;
1680
1681 p->thread.tidr = 0;
1682 #endif
1683 kregs->nip = ppc_function_entry(f);
1684 return 0;
1685 }
1686
1687 void preload_new_slb_context(unsigned long start, unsigned long sp);
1688
1689 /*
1690 * Set up a thread for executing a new program
1691 */
start_thread(struct pt_regs * regs,unsigned long start,unsigned long sp)1692 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1693 {
1694 #ifdef CONFIG_PPC64
1695 unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
1696
1697 #ifdef CONFIG_PPC_BOOK3S_64
1698 if (!radix_enabled())
1699 preload_new_slb_context(start, sp);
1700 #endif
1701 #endif
1702
1703 /*
1704 * If we exec out of a kernel thread then thread.regs will not be
1705 * set. Do it now.
1706 */
1707 if (!current->thread.regs) {
1708 struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1709 current->thread.regs = regs - 1;
1710 }
1711
1712 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1713 /*
1714 * Clear any transactional state, we're exec()ing. The cause is
1715 * not important as there will never be a recheckpoint so it's not
1716 * user visible.
1717 */
1718 if (MSR_TM_SUSPENDED(mfmsr()))
1719 tm_reclaim_current(0);
1720 #endif
1721
1722 memset(®s->gpr[1], 0, sizeof(regs->gpr) - sizeof(regs->gpr[0]));
1723 regs->ctr = 0;
1724 regs->link = 0;
1725 regs->xer = 0;
1726 regs->ccr = 0;
1727 regs->gpr[1] = sp;
1728
1729 /*
1730 * We have just cleared all the nonvolatile GPRs, so make
1731 * FULL_REGS(regs) return true. This is necessary to allow
1732 * ptrace to examine the thread immediately after exec.
1733 */
1734 regs->trap &= ~1UL;
1735
1736 #ifdef CONFIG_PPC32
1737 regs->mq = 0;
1738 regs->nip = start;
1739 regs->msr = MSR_USER;
1740 #else
1741 if (!is_32bit_task()) {
1742 unsigned long entry;
1743
1744 if (is_elf2_task()) {
1745 /* Look ma, no function descriptors! */
1746 entry = start;
1747
1748 /*
1749 * Ulrich says:
1750 * The latest iteration of the ABI requires that when
1751 * calling a function (at its global entry point),
1752 * the caller must ensure r12 holds the entry point
1753 * address (so that the function can quickly
1754 * establish addressability).
1755 */
1756 regs->gpr[12] = start;
1757 /* Make sure that's restored on entry to userspace. */
1758 set_thread_flag(TIF_RESTOREALL);
1759 } else {
1760 unsigned long toc;
1761
1762 /* start is a relocated pointer to the function
1763 * descriptor for the elf _start routine. The first
1764 * entry in the function descriptor is the entry
1765 * address of _start and the second entry is the TOC
1766 * value we need to use.
1767 */
1768 __get_user(entry, (unsigned long __user *)start);
1769 __get_user(toc, (unsigned long __user *)start+1);
1770
1771 /* Check whether the e_entry function descriptor entries
1772 * need to be relocated before we can use them.
1773 */
1774 if (load_addr != 0) {
1775 entry += load_addr;
1776 toc += load_addr;
1777 }
1778 regs->gpr[2] = toc;
1779 }
1780 regs->nip = entry;
1781 regs->msr = MSR_USER64;
1782 } else {
1783 regs->nip = start;
1784 regs->gpr[2] = 0;
1785 regs->msr = MSR_USER32;
1786 }
1787 #endif
1788 #ifdef CONFIG_VSX
1789 current->thread.used_vsr = 0;
1790 #endif
1791 current->thread.load_slb = 0;
1792 current->thread.load_fp = 0;
1793 memset(¤t->thread.fp_state, 0, sizeof(current->thread.fp_state));
1794 current->thread.fp_save_area = NULL;
1795 #ifdef CONFIG_ALTIVEC
1796 memset(¤t->thread.vr_state, 0, sizeof(current->thread.vr_state));
1797 current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
1798 current->thread.vr_save_area = NULL;
1799 current->thread.vrsave = 0;
1800 current->thread.used_vr = 0;
1801 current->thread.load_vec = 0;
1802 #endif /* CONFIG_ALTIVEC */
1803 #ifdef CONFIG_SPE
1804 memset(current->thread.evr, 0, sizeof(current->thread.evr));
1805 current->thread.acc = 0;
1806 current->thread.spefscr = 0;
1807 current->thread.used_spe = 0;
1808 #endif /* CONFIG_SPE */
1809 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1810 current->thread.tm_tfhar = 0;
1811 current->thread.tm_texasr = 0;
1812 current->thread.tm_tfiar = 0;
1813 current->thread.load_tm = 0;
1814 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1815
1816 thread_pkey_regs_init(¤t->thread);
1817 }
1818 EXPORT_SYMBOL(start_thread);
1819
1820 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1821 | PR_FP_EXC_RES | PR_FP_EXC_INV)
1822
set_fpexc_mode(struct task_struct * tsk,unsigned int val)1823 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1824 {
1825 struct pt_regs *regs = tsk->thread.regs;
1826
1827 /* This is a bit hairy. If we are an SPE enabled processor
1828 * (have embedded fp) we store the IEEE exception enable flags in
1829 * fpexc_mode. fpexc_mode is also used for setting FP exception
1830 * mode (asyn, precise, disabled) for 'Classic' FP. */
1831 if (val & PR_FP_EXC_SW_ENABLE) {
1832 #ifdef CONFIG_SPE
1833 if (cpu_has_feature(CPU_FTR_SPE)) {
1834 /*
1835 * When the sticky exception bits are set
1836 * directly by userspace, it must call prctl
1837 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1838 * in the existing prctl settings) or
1839 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1840 * the bits being set). <fenv.h> functions
1841 * saving and restoring the whole
1842 * floating-point environment need to do so
1843 * anyway to restore the prctl settings from
1844 * the saved environment.
1845 */
1846 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1847 tsk->thread.fpexc_mode = val &
1848 (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1849 return 0;
1850 } else {
1851 return -EINVAL;
1852 }
1853 #else
1854 return -EINVAL;
1855 #endif
1856 }
1857
1858 /* on a CONFIG_SPE this does not hurt us. The bits that
1859 * __pack_fe01 use do not overlap with bits used for
1860 * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
1861 * on CONFIG_SPE implementations are reserved so writing to
1862 * them does not change anything */
1863 if (val > PR_FP_EXC_PRECISE)
1864 return -EINVAL;
1865 tsk->thread.fpexc_mode = __pack_fe01(val);
1866 if (regs != NULL && (regs->msr & MSR_FP) != 0)
1867 regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1868 | tsk->thread.fpexc_mode;
1869 return 0;
1870 }
1871
get_fpexc_mode(struct task_struct * tsk,unsigned long adr)1872 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1873 {
1874 unsigned int val;
1875
1876 if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1877 #ifdef CONFIG_SPE
1878 if (cpu_has_feature(CPU_FTR_SPE)) {
1879 /*
1880 * When the sticky exception bits are set
1881 * directly by userspace, it must call prctl
1882 * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
1883 * in the existing prctl settings) or
1884 * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
1885 * the bits being set). <fenv.h> functions
1886 * saving and restoring the whole
1887 * floating-point environment need to do so
1888 * anyway to restore the prctl settings from
1889 * the saved environment.
1890 */
1891 tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
1892 val = tsk->thread.fpexc_mode;
1893 } else
1894 return -EINVAL;
1895 #else
1896 return -EINVAL;
1897 #endif
1898 else
1899 val = __unpack_fe01(tsk->thread.fpexc_mode);
1900 return put_user(val, (unsigned int __user *) adr);
1901 }
1902
set_endian(struct task_struct * tsk,unsigned int val)1903 int set_endian(struct task_struct *tsk, unsigned int val)
1904 {
1905 struct pt_regs *regs = tsk->thread.regs;
1906
1907 if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1908 (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1909 return -EINVAL;
1910
1911 if (regs == NULL)
1912 return -EINVAL;
1913
1914 if (val == PR_ENDIAN_BIG)
1915 regs->msr &= ~MSR_LE;
1916 else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1917 regs->msr |= MSR_LE;
1918 else
1919 return -EINVAL;
1920
1921 return 0;
1922 }
1923
get_endian(struct task_struct * tsk,unsigned long adr)1924 int get_endian(struct task_struct *tsk, unsigned long adr)
1925 {
1926 struct pt_regs *regs = tsk->thread.regs;
1927 unsigned int val;
1928
1929 if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1930 !cpu_has_feature(CPU_FTR_REAL_LE))
1931 return -EINVAL;
1932
1933 if (regs == NULL)
1934 return -EINVAL;
1935
1936 if (regs->msr & MSR_LE) {
1937 if (cpu_has_feature(CPU_FTR_REAL_LE))
1938 val = PR_ENDIAN_LITTLE;
1939 else
1940 val = PR_ENDIAN_PPC_LITTLE;
1941 } else
1942 val = PR_ENDIAN_BIG;
1943
1944 return put_user(val, (unsigned int __user *)adr);
1945 }
1946
set_unalign_ctl(struct task_struct * tsk,unsigned int val)1947 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1948 {
1949 tsk->thread.align_ctl = val;
1950 return 0;
1951 }
1952
get_unalign_ctl(struct task_struct * tsk,unsigned long adr)1953 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1954 {
1955 return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1956 }
1957
valid_irq_stack(unsigned long sp,struct task_struct * p,unsigned long nbytes)1958 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1959 unsigned long nbytes)
1960 {
1961 unsigned long stack_page;
1962 unsigned long cpu = task_cpu(p);
1963
1964 stack_page = (unsigned long)hardirq_ctx[cpu];
1965 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
1966 return 1;
1967
1968 stack_page = (unsigned long)softirq_ctx[cpu];
1969 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
1970 return 1;
1971
1972 return 0;
1973 }
1974
validate_sp(unsigned long sp,struct task_struct * p,unsigned long nbytes)1975 int validate_sp(unsigned long sp, struct task_struct *p,
1976 unsigned long nbytes)
1977 {
1978 unsigned long stack_page = (unsigned long)task_stack_page(p);
1979
1980 if (sp < THREAD_SIZE)
1981 return 0;
1982
1983 if (sp >= stack_page && sp <= stack_page + THREAD_SIZE - nbytes)
1984 return 1;
1985
1986 return valid_irq_stack(sp, p, nbytes);
1987 }
1988
1989 EXPORT_SYMBOL(validate_sp);
1990
__get_wchan(struct task_struct * p)1991 static unsigned long __get_wchan(struct task_struct *p)
1992 {
1993 unsigned long ip, sp;
1994 int count = 0;
1995
1996 if (!p || p == current || p->state == TASK_RUNNING)
1997 return 0;
1998
1999 sp = p->thread.ksp;
2000 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
2001 return 0;
2002
2003 do {
2004 sp = READ_ONCE_NOCHECK(*(unsigned long *)sp);
2005 if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) ||
2006 p->state == TASK_RUNNING)
2007 return 0;
2008 if (count > 0) {
2009 ip = READ_ONCE_NOCHECK(((unsigned long *)sp)[STACK_FRAME_LR_SAVE]);
2010 if (!in_sched_functions(ip))
2011 return ip;
2012 }
2013 } while (count++ < 16);
2014 return 0;
2015 }
2016
get_wchan(struct task_struct * p)2017 unsigned long get_wchan(struct task_struct *p)
2018 {
2019 unsigned long ret;
2020
2021 if (!try_get_task_stack(p))
2022 return 0;
2023
2024 ret = __get_wchan(p);
2025
2026 put_task_stack(p);
2027
2028 return ret;
2029 }
2030
2031 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
2032
show_stack(struct task_struct * tsk,unsigned long * stack)2033 void show_stack(struct task_struct *tsk, unsigned long *stack)
2034 {
2035 unsigned long sp, ip, lr, newsp;
2036 int count = 0;
2037 int firstframe = 1;
2038 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2039 unsigned long ret_addr;
2040 int ftrace_idx = 0;
2041 #endif
2042
2043 if (tsk == NULL)
2044 tsk = current;
2045
2046 if (!try_get_task_stack(tsk))
2047 return;
2048
2049 sp = (unsigned long) stack;
2050 if (sp == 0) {
2051 if (tsk == current)
2052 sp = current_stack_pointer();
2053 else
2054 sp = tsk->thread.ksp;
2055 }
2056
2057 lr = 0;
2058 printk("Call Trace:\n");
2059 do {
2060 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
2061 break;
2062
2063 stack = (unsigned long *) sp;
2064 newsp = stack[0];
2065 ip = stack[STACK_FRAME_LR_SAVE];
2066 if (!firstframe || ip != lr) {
2067 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2068 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2069 ret_addr = ftrace_graph_ret_addr(current,
2070 &ftrace_idx, ip, stack);
2071 if (ret_addr != ip)
2072 pr_cont(" (%pS)", (void *)ret_addr);
2073 #endif
2074 if (firstframe)
2075 pr_cont(" (unreliable)");
2076 pr_cont("\n");
2077 }
2078 firstframe = 0;
2079
2080 /*
2081 * See if this is an exception frame.
2082 * We look for the "regshere" marker in the current frame.
2083 */
2084 if (validate_sp(sp, tsk, STACK_FRAME_WITH_PT_REGS)
2085 && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
2086 struct pt_regs *regs = (struct pt_regs *)
2087 (sp + STACK_FRAME_OVERHEAD);
2088 lr = regs->link;
2089 printk("--- interrupt: %lx at %pS\n LR = %pS\n",
2090 regs->trap, (void *)regs->nip, (void *)lr);
2091 firstframe = 1;
2092 }
2093
2094 sp = newsp;
2095 } while (count++ < kstack_depth_to_print);
2096
2097 put_task_stack(tsk);
2098 }
2099
2100 #ifdef CONFIG_PPC64
2101 /* Called with hard IRQs off */
__ppc64_runlatch_on(void)2102 void notrace __ppc64_runlatch_on(void)
2103 {
2104 struct thread_info *ti = current_thread_info();
2105
2106 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2107 /*
2108 * Least significant bit (RUN) is the only writable bit of
2109 * the CTRL register, so we can avoid mfspr. 2.06 is not the
2110 * earliest ISA where this is the case, but it's convenient.
2111 */
2112 mtspr(SPRN_CTRLT, CTRL_RUNLATCH);
2113 } else {
2114 unsigned long ctrl;
2115
2116 /*
2117 * Some architectures (e.g., Cell) have writable fields other
2118 * than RUN, so do the read-modify-write.
2119 */
2120 ctrl = mfspr(SPRN_CTRLF);
2121 ctrl |= CTRL_RUNLATCH;
2122 mtspr(SPRN_CTRLT, ctrl);
2123 }
2124
2125 ti->local_flags |= _TLF_RUNLATCH;
2126 }
2127
2128 /* Called with hard IRQs off */
__ppc64_runlatch_off(void)2129 void notrace __ppc64_runlatch_off(void)
2130 {
2131 struct thread_info *ti = current_thread_info();
2132
2133 ti->local_flags &= ~_TLF_RUNLATCH;
2134
2135 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
2136 mtspr(SPRN_CTRLT, 0);
2137 } else {
2138 unsigned long ctrl;
2139
2140 ctrl = mfspr(SPRN_CTRLF);
2141 ctrl &= ~CTRL_RUNLATCH;
2142 mtspr(SPRN_CTRLT, ctrl);
2143 }
2144 }
2145 #endif /* CONFIG_PPC64 */
2146
arch_align_stack(unsigned long sp)2147 unsigned long arch_align_stack(unsigned long sp)
2148 {
2149 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2150 sp -= get_random_int() & ~PAGE_MASK;
2151 return sp & ~0xf;
2152 }
2153
brk_rnd(void)2154 static inline unsigned long brk_rnd(void)
2155 {
2156 unsigned long rnd = 0;
2157
2158 /* 8MB for 32bit, 1GB for 64bit */
2159 if (is_32bit_task())
2160 rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
2161 else
2162 rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
2163
2164 return rnd << PAGE_SHIFT;
2165 }
2166
arch_randomize_brk(struct mm_struct * mm)2167 unsigned long arch_randomize_brk(struct mm_struct *mm)
2168 {
2169 unsigned long base = mm->brk;
2170 unsigned long ret;
2171
2172 #ifdef CONFIG_PPC_BOOK3S_64
2173 /*
2174 * If we are using 1TB segments and we are allowed to randomise
2175 * the heap, we can put it above 1TB so it is backed by a 1TB
2176 * segment. Otherwise the heap will be in the bottom 1TB
2177 * which always uses 256MB segments and this may result in a
2178 * performance penalty. We don't need to worry about radix. For
2179 * radix, mmu_highuser_ssize remains unchanged from 256MB.
2180 */
2181 if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2182 base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2183 #endif
2184
2185 ret = PAGE_ALIGN(base + brk_rnd());
2186
2187 if (ret < mm->brk)
2188 return mm->brk;
2189
2190 return ret;
2191 }
2192
2193