• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Derived from "arch/i386/kernel/process.c"
3  *    Copyright (C) 1995  Linus Torvalds
4  *
5  *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
6  *  Paul Mackerras (paulus@cs.anu.edu.au)
7  *
8  *  PowerPC version
9  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
10  *
11  *  This program is free software; you can redistribute it and/or
12  *  modify it under the terms of the GNU General Public License
13  *  as published by the Free Software Foundation; either version
14  *  2 of the License, or (at your option) any later version.
15  */
16 
17 #include <linux/errno.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/mm.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/unistd.h>
24 #include <linux/ptrace.h>
25 #include <linux/slab.h>
26 #include <linux/user.h>
27 #include <linux/elf.h>
28 #include <linux/init.h>
29 #include <linux/prctl.h>
30 #include <linux/init_task.h>
31 #include <linux/export.h>
32 #include <linux/kallsyms.h>
33 #include <linux/mqueue.h>
34 #include <linux/hardirq.h>
35 #include <linux/utsname.h>
36 #include <linux/ftrace.h>
37 #include <linux/kernel_stat.h>
38 #include <linux/personality.h>
39 #include <linux/random.h>
40 #include <linux/hw_breakpoint.h>
41 
42 #include <asm/pgtable.h>
43 #include <asm/uaccess.h>
44 #include <asm/io.h>
45 #include <asm/processor.h>
46 #include <asm/mmu.h>
47 #include <asm/prom.h>
48 #include <asm/machdep.h>
49 #include <asm/time.h>
50 #include <asm/runlatch.h>
51 #include <asm/syscalls.h>
52 #include <asm/switch_to.h>
53 #include <asm/tm.h>
54 #include <asm/debug.h>
55 #ifdef CONFIG_PPC64
56 #include <asm/firmware.h>
57 #endif
58 #include <linux/kprobes.h>
59 #include <linux/kdebug.h>
60 
61 /* Transactional Memory debug */
62 #ifdef TM_DEBUG_SW
63 #define TM_DEBUG(x...) printk(KERN_INFO x)
64 #else
65 #define TM_DEBUG(x...) do { } while(0)
66 #endif
67 
68 extern unsigned long _get_SP(void);
69 
70 #ifndef CONFIG_SMP
71 struct task_struct *last_task_used_math = NULL;
72 struct task_struct *last_task_used_altivec = NULL;
73 struct task_struct *last_task_used_vsx = NULL;
74 struct task_struct *last_task_used_spe = NULL;
75 #endif
76 
77 /*
78  * Make sure the floating-point register state in the
79  * the thread_struct is up to date for task tsk.
80  */
flush_fp_to_thread(struct task_struct * tsk)81 void flush_fp_to_thread(struct task_struct *tsk)
82 {
83 	if (tsk->thread.regs) {
84 		/*
85 		 * We need to disable preemption here because if we didn't,
86 		 * another process could get scheduled after the regs->msr
87 		 * test but before we have finished saving the FP registers
88 		 * to the thread_struct.  That process could take over the
89 		 * FPU, and then when we get scheduled again we would store
90 		 * bogus values for the remaining FP registers.
91 		 */
92 		preempt_disable();
93 		if (tsk->thread.regs->msr & MSR_FP) {
94 #ifdef CONFIG_SMP
95 			/*
96 			 * This should only ever be called for current or
97 			 * for a stopped child process.  Since we save away
98 			 * the FP register state on context switch on SMP,
99 			 * there is something wrong if a stopped child appears
100 			 * to still have its FP state in the CPU registers.
101 			 */
102 			BUG_ON(tsk != current);
103 #endif
104 			giveup_fpu(tsk);
105 		}
106 		preempt_enable();
107 	}
108 }
109 EXPORT_SYMBOL_GPL(flush_fp_to_thread);
110 
enable_kernel_fp(void)111 void enable_kernel_fp(void)
112 {
113 	WARN_ON(preemptible());
114 
115 #ifdef CONFIG_SMP
116 	if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
117 		giveup_fpu(current);
118 	else
119 		giveup_fpu(NULL);	/* just enables FP for kernel */
120 #else
121 	giveup_fpu(last_task_used_math);
122 #endif /* CONFIG_SMP */
123 }
124 EXPORT_SYMBOL(enable_kernel_fp);
125 
126 #ifdef CONFIG_ALTIVEC
enable_kernel_altivec(void)127 void enable_kernel_altivec(void)
128 {
129 	WARN_ON(preemptible());
130 
131 #ifdef CONFIG_SMP
132 	if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
133 		giveup_altivec(current);
134 	else
135 		giveup_altivec_notask();
136 #else
137 	giveup_altivec(last_task_used_altivec);
138 #endif /* CONFIG_SMP */
139 }
140 EXPORT_SYMBOL(enable_kernel_altivec);
141 
142 /*
143  * Make sure the VMX/Altivec register state in the
144  * the thread_struct is up to date for task tsk.
145  */
flush_altivec_to_thread(struct task_struct * tsk)146 void flush_altivec_to_thread(struct task_struct *tsk)
147 {
148 	if (tsk->thread.regs) {
149 		preempt_disable();
150 		if (tsk->thread.regs->msr & MSR_VEC) {
151 #ifdef CONFIG_SMP
152 			BUG_ON(tsk != current);
153 #endif
154 			giveup_altivec(tsk);
155 		}
156 		preempt_enable();
157 	}
158 }
159 EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
160 #endif /* CONFIG_ALTIVEC */
161 
162 #ifdef CONFIG_VSX
163 #if 0
164 /* not currently used, but some crazy RAID module might want to later */
165 void enable_kernel_vsx(void)
166 {
167 	WARN_ON(preemptible());
168 
169 #ifdef CONFIG_SMP
170 	if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
171 		giveup_vsx(current);
172 	else
173 		giveup_vsx(NULL);	/* just enable vsx for kernel - force */
174 #else
175 	giveup_vsx(last_task_used_vsx);
176 #endif /* CONFIG_SMP */
177 }
178 EXPORT_SYMBOL(enable_kernel_vsx);
179 #endif
180 
giveup_vsx(struct task_struct * tsk)181 void giveup_vsx(struct task_struct *tsk)
182 {
183 	giveup_fpu(tsk);
184 	giveup_altivec(tsk);
185 	__giveup_vsx(tsk);
186 }
187 
flush_vsx_to_thread(struct task_struct * tsk)188 void flush_vsx_to_thread(struct task_struct *tsk)
189 {
190 	if (tsk->thread.regs) {
191 		preempt_disable();
192 		if (tsk->thread.regs->msr & MSR_VSX) {
193 #ifdef CONFIG_SMP
194 			BUG_ON(tsk != current);
195 #endif
196 			giveup_vsx(tsk);
197 		}
198 		preempt_enable();
199 	}
200 }
201 EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
202 #endif /* CONFIG_VSX */
203 
204 #ifdef CONFIG_SPE
205 
enable_kernel_spe(void)206 void enable_kernel_spe(void)
207 {
208 	WARN_ON(preemptible());
209 
210 #ifdef CONFIG_SMP
211 	if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
212 		giveup_spe(current);
213 	else
214 		giveup_spe(NULL);	/* just enable SPE for kernel - force */
215 #else
216 	giveup_spe(last_task_used_spe);
217 #endif /* __SMP __ */
218 }
219 EXPORT_SYMBOL(enable_kernel_spe);
220 
flush_spe_to_thread(struct task_struct * tsk)221 void flush_spe_to_thread(struct task_struct *tsk)
222 {
223 	if (tsk->thread.regs) {
224 		preempt_disable();
225 		if (tsk->thread.regs->msr & MSR_SPE) {
226 #ifdef CONFIG_SMP
227 			BUG_ON(tsk != current);
228 #endif
229 			tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
230 			giveup_spe(tsk);
231 		}
232 		preempt_enable();
233 	}
234 }
235 #endif /* CONFIG_SPE */
236 
237 #ifndef CONFIG_SMP
238 /*
239  * If we are doing lazy switching of CPU state (FP, altivec or SPE),
240  * and the current task has some state, discard it.
241  */
discard_lazy_cpu_state(void)242 void discard_lazy_cpu_state(void)
243 {
244 	preempt_disable();
245 	if (last_task_used_math == current)
246 		last_task_used_math = NULL;
247 #ifdef CONFIG_ALTIVEC
248 	if (last_task_used_altivec == current)
249 		last_task_used_altivec = NULL;
250 #endif /* CONFIG_ALTIVEC */
251 #ifdef CONFIG_VSX
252 	if (last_task_used_vsx == current)
253 		last_task_used_vsx = NULL;
254 #endif /* CONFIG_VSX */
255 #ifdef CONFIG_SPE
256 	if (last_task_used_spe == current)
257 		last_task_used_spe = NULL;
258 #endif
259 	preempt_enable();
260 }
261 #endif /* CONFIG_SMP */
262 
263 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
do_send_trap(struct pt_regs * regs,unsigned long address,unsigned long error_code,int signal_code,int breakpt)264 void do_send_trap(struct pt_regs *regs, unsigned long address,
265 		  unsigned long error_code, int signal_code, int breakpt)
266 {
267 	siginfo_t info;
268 
269 	current->thread.trap_nr = signal_code;
270 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
271 			11, SIGSEGV) == NOTIFY_STOP)
272 		return;
273 
274 	/* Deliver the signal to userspace */
275 	info.si_signo = SIGTRAP;
276 	info.si_errno = breakpt;	/* breakpoint or watchpoint id */
277 	info.si_code = signal_code;
278 	info.si_addr = (void __user *)address;
279 	force_sig_info(SIGTRAP, &info, current);
280 }
281 #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
do_break(struct pt_regs * regs,unsigned long address,unsigned long error_code)282 void do_break (struct pt_regs *regs, unsigned long address,
283 		    unsigned long error_code)
284 {
285 	siginfo_t info;
286 
287 	current->thread.trap_nr = TRAP_HWBKPT;
288 	if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
289 			11, SIGSEGV) == NOTIFY_STOP)
290 		return;
291 
292 	if (debugger_break_match(regs))
293 		return;
294 
295 	/* Clear the breakpoint */
296 	hw_breakpoint_disable();
297 
298 	/* Deliver the signal to userspace */
299 	info.si_signo = SIGTRAP;
300 	info.si_errno = 0;
301 	info.si_code = TRAP_HWBKPT;
302 	info.si_addr = (void __user *)address;
303 	force_sig_info(SIGTRAP, &info, current);
304 }
305 #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
306 
307 static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
308 
309 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
310 /*
311  * Set the debug registers back to their default "safe" values.
312  */
set_debug_reg_defaults(struct thread_struct * thread)313 static void set_debug_reg_defaults(struct thread_struct *thread)
314 {
315 	thread->iac1 = thread->iac2 = 0;
316 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
317 	thread->iac3 = thread->iac4 = 0;
318 #endif
319 	thread->dac1 = thread->dac2 = 0;
320 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
321 	thread->dvc1 = thread->dvc2 = 0;
322 #endif
323 	thread->dbcr0 = 0;
324 #ifdef CONFIG_BOOKE
325 	/*
326 	 * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
327 	 */
328 	thread->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |	\
329 			DBCR1_IAC3US | DBCR1_IAC4US;
330 	/*
331 	 * Force Data Address Compare User/Supervisor bits to be User-only
332 	 * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
333 	 */
334 	thread->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
335 #else
336 	thread->dbcr1 = 0;
337 #endif
338 }
339 
prime_debug_regs(struct thread_struct * thread)340 static void prime_debug_regs(struct thread_struct *thread)
341 {
342 	/*
343 	 * We could have inherited MSR_DE from userspace, since
344 	 * it doesn't get cleared on exception entry.  Make sure
345 	 * MSR_DE is clear before we enable any debug events.
346 	 */
347 	mtmsr(mfmsr() & ~MSR_DE);
348 
349 	mtspr(SPRN_IAC1, thread->iac1);
350 	mtspr(SPRN_IAC2, thread->iac2);
351 #if CONFIG_PPC_ADV_DEBUG_IACS > 2
352 	mtspr(SPRN_IAC3, thread->iac3);
353 	mtspr(SPRN_IAC4, thread->iac4);
354 #endif
355 	mtspr(SPRN_DAC1, thread->dac1);
356 	mtspr(SPRN_DAC2, thread->dac2);
357 #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
358 	mtspr(SPRN_DVC1, thread->dvc1);
359 	mtspr(SPRN_DVC2, thread->dvc2);
360 #endif
361 	mtspr(SPRN_DBCR0, thread->dbcr0);
362 	mtspr(SPRN_DBCR1, thread->dbcr1);
363 #ifdef CONFIG_BOOKE
364 	mtspr(SPRN_DBCR2, thread->dbcr2);
365 #endif
366 }
367 /*
368  * Unless neither the old or new thread are making use of the
369  * debug registers, set the debug registers from the values
370  * stored in the new thread.
371  */
switch_booke_debug_regs(struct thread_struct * new_thread)372 static void switch_booke_debug_regs(struct thread_struct *new_thread)
373 {
374 	if ((current->thread.dbcr0 & DBCR0_IDM)
375 		|| (new_thread->dbcr0 & DBCR0_IDM))
376 			prime_debug_regs(new_thread);
377 }
378 #else	/* !CONFIG_PPC_ADV_DEBUG_REGS */
379 #ifndef CONFIG_HAVE_HW_BREAKPOINT
set_debug_reg_defaults(struct thread_struct * thread)380 static void set_debug_reg_defaults(struct thread_struct *thread)
381 {
382 	thread->hw_brk.address = 0;
383 	thread->hw_brk.type = 0;
384 	set_breakpoint(&thread->hw_brk);
385 }
386 #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
387 #endif	/* CONFIG_PPC_ADV_DEBUG_REGS */
388 
389 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
__set_dabr(unsigned long dabr,unsigned long dabrx)390 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
391 {
392 	mtspr(SPRN_DAC1, dabr);
393 #ifdef CONFIG_PPC_47x
394 	isync();
395 #endif
396 	return 0;
397 }
398 #elif defined(CONFIG_PPC_BOOK3S)
__set_dabr(unsigned long dabr,unsigned long dabrx)399 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
400 {
401 	mtspr(SPRN_DABR, dabr);
402 	if (cpu_has_feature(CPU_FTR_DABRX))
403 		mtspr(SPRN_DABRX, dabrx);
404 	return 0;
405 }
406 #else
__set_dabr(unsigned long dabr,unsigned long dabrx)407 static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
408 {
409 	return -EINVAL;
410 }
411 #endif
412 
set_dabr(struct arch_hw_breakpoint * brk)413 static inline int set_dabr(struct arch_hw_breakpoint *brk)
414 {
415 	unsigned long dabr, dabrx;
416 
417 	dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
418 	dabrx = ((brk->type >> 3) & 0x7);
419 
420 	if (ppc_md.set_dabr)
421 		return ppc_md.set_dabr(dabr, dabrx);
422 
423 	return __set_dabr(dabr, dabrx);
424 }
425 
set_dawr(struct arch_hw_breakpoint * brk)426 static inline int set_dawr(struct arch_hw_breakpoint *brk)
427 {
428 	unsigned long dawr, dawrx, mrd;
429 
430 	dawr = brk->address;
431 
432 	dawrx  = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
433 		                   << (63 - 58); //* read/write bits */
434 	dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
435 		                   << (63 - 59); //* translate */
436 	dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
437 		                   >> 3; //* PRIM bits */
438 	/* dawr length is stored in field MDR bits 48:53.  Matches range in
439 	   doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
440 	   0b111111=64DW.
441 	   brk->len is in bytes.
442 	   This aligns up to double word size, shifts and does the bias.
443 	*/
444 	mrd = ((brk->len + 7) >> 3) - 1;
445 	dawrx |= (mrd & 0x3f) << (63 - 53);
446 
447 	if (ppc_md.set_dawr)
448 		return ppc_md.set_dawr(dawr, dawrx);
449 	mtspr(SPRN_DAWR, dawr);
450 	mtspr(SPRN_DAWRX, dawrx);
451 	return 0;
452 }
453 
set_breakpoint(struct arch_hw_breakpoint * brk)454 int set_breakpoint(struct arch_hw_breakpoint *brk)
455 {
456 	__get_cpu_var(current_brk) = *brk;
457 
458 	if (cpu_has_feature(CPU_FTR_DAWR))
459 		return set_dawr(brk);
460 
461 	return set_dabr(brk);
462 }
463 
464 #ifdef CONFIG_PPC64
465 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
466 #endif
467 
hw_brk_match(struct arch_hw_breakpoint * a,struct arch_hw_breakpoint * b)468 static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
469 			      struct arch_hw_breakpoint *b)
470 {
471 	if (a->address != b->address)
472 		return false;
473 	if (a->type != b->type)
474 		return false;
475 	if (a->len != b->len)
476 		return false;
477 	return true;
478 }
479 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_reclaim_task(struct task_struct * tsk)480 static inline void tm_reclaim_task(struct task_struct *tsk)
481 {
482 	/* We have to work out if we're switching from/to a task that's in the
483 	 * middle of a transaction.
484 	 *
485 	 * In switching we need to maintain a 2nd register state as
486 	 * oldtask->thread.ckpt_regs.  We tm_reclaim(oldproc); this saves the
487 	 * checkpointed (tbegin) state in ckpt_regs and saves the transactional
488 	 * (current) FPRs into oldtask->thread.transact_fpr[].
489 	 *
490 	 * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
491 	 */
492 	struct thread_struct *thr = &tsk->thread;
493 
494 	if (!thr->regs)
495 		return;
496 
497 	if (!MSR_TM_ACTIVE(thr->regs->msr))
498 		goto out_and_saveregs;
499 
500 	/* Stash the original thread MSR, as giveup_fpu et al will
501 	 * modify it.  We hold onto it to see whether the task used
502 	 * FP & vector regs.
503 	 */
504 	thr->tm_orig_msr = thr->regs->msr;
505 
506 	TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
507 		 "ccr=%lx, msr=%lx, trap=%lx)\n",
508 		 tsk->pid, thr->regs->nip,
509 		 thr->regs->ccr, thr->regs->msr,
510 		 thr->regs->trap);
511 
512 	tm_reclaim(thr, thr->regs->msr, TM_CAUSE_RESCHED);
513 
514 	TM_DEBUG("--- tm_reclaim on pid %d complete\n",
515 		 tsk->pid);
516 
517 out_and_saveregs:
518 	/* Always save the regs here, even if a transaction's not active.
519 	 * This context-switches a thread's TM info SPRs.  We do it here to
520 	 * be consistent with the restore path (in recheckpoint) which
521 	 * cannot happen later in _switch().
522 	 */
523 	tm_save_sprs(thr);
524 }
525 
tm_recheckpoint_new_task(struct task_struct * new)526 static inline void tm_recheckpoint_new_task(struct task_struct *new)
527 {
528 	unsigned long msr;
529 
530 	if (!cpu_has_feature(CPU_FTR_TM))
531 		return;
532 
533 	/* Recheckpoint the registers of the thread we're about to switch to.
534 	 *
535 	 * If the task was using FP, we non-lazily reload both the original and
536 	 * the speculative FP register states.  This is because the kernel
537 	 * doesn't see if/when a TM rollback occurs, so if we take an FP
538 	 * unavoidable later, we are unable to determine which set of FP regs
539 	 * need to be restored.
540 	 */
541 	if (!new->thread.regs)
542 		return;
543 
544 	/* The TM SPRs are restored here, so that TEXASR.FS can be set
545 	 * before the trecheckpoint and no explosion occurs.
546 	 */
547 	tm_restore_sprs(&new->thread);
548 
549 	if (!MSR_TM_ACTIVE(new->thread.regs->msr))
550 		return;
551 	msr = new->thread.tm_orig_msr;
552 	/* Recheckpoint to restore original checkpointed register state. */
553 	TM_DEBUG("*** tm_recheckpoint of pid %d "
554 		 "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
555 		 new->pid, new->thread.regs->msr, msr);
556 
557 	/* This loads the checkpointed FP/VEC state, if used */
558 	tm_recheckpoint(&new->thread, msr);
559 
560 	/* This loads the speculative FP/VEC state, if used */
561 	if (msr & MSR_FP) {
562 		do_load_up_transact_fpu(&new->thread);
563 		new->thread.regs->msr |=
564 			(MSR_FP | new->thread.fpexc_mode);
565 	}
566 #ifdef CONFIG_ALTIVEC
567 	if (msr & MSR_VEC) {
568 		do_load_up_transact_altivec(&new->thread);
569 		new->thread.regs->msr |= MSR_VEC;
570 	}
571 #endif
572 	/* We may as well turn on VSX too since all the state is restored now */
573 	if (msr & MSR_VSX)
574 		new->thread.regs->msr |= MSR_VSX;
575 
576 	TM_DEBUG("*** tm_recheckpoint of pid %d complete "
577 		 "(kernel msr 0x%lx)\n",
578 		 new->pid, mfmsr());
579 }
580 
__switch_to_tm(struct task_struct * prev)581 static inline void __switch_to_tm(struct task_struct *prev)
582 {
583 	if (cpu_has_feature(CPU_FTR_TM)) {
584 		tm_enable();
585 		tm_reclaim_task(prev);
586 	}
587 }
588 #else
589 #define tm_recheckpoint_new_task(new)
590 #define __switch_to_tm(prev)
591 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
592 
__switch_to(struct task_struct * prev,struct task_struct * new)593 struct task_struct *__switch_to(struct task_struct *prev,
594 	struct task_struct *new)
595 {
596 	struct thread_struct *new_thread, *old_thread;
597 	unsigned long flags;
598 	struct task_struct *last;
599 #ifdef CONFIG_PPC_BOOK3S_64
600 	struct ppc64_tlb_batch *batch;
601 #endif
602 
603 	__switch_to_tm(prev);
604 
605 #ifdef CONFIG_SMP
606 	/* avoid complexity of lazy save/restore of fpu
607 	 * by just saving it every time we switch out if
608 	 * this task used the fpu during the last quantum.
609 	 *
610 	 * If it tries to use the fpu again, it'll trap and
611 	 * reload its fp regs.  So we don't have to do a restore
612 	 * every switch, just a save.
613 	 *  -- Cort
614 	 */
615 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
616 		giveup_fpu(prev);
617 #ifdef CONFIG_ALTIVEC
618 	/*
619 	 * If the previous thread used altivec in the last quantum
620 	 * (thus changing altivec regs) then save them.
621 	 * We used to check the VRSAVE register but not all apps
622 	 * set it, so we don't rely on it now (and in fact we need
623 	 * to save & restore VSCR even if VRSAVE == 0).  -- paulus
624 	 *
625 	 * On SMP we always save/restore altivec regs just to avoid the
626 	 * complexity of changing processors.
627 	 *  -- Cort
628 	 */
629 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
630 		giveup_altivec(prev);
631 #endif /* CONFIG_ALTIVEC */
632 #ifdef CONFIG_VSX
633 	if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
634 		/* VMX and FPU registers are already save here */
635 		__giveup_vsx(prev);
636 #endif /* CONFIG_VSX */
637 #ifdef CONFIG_SPE
638 	/*
639 	 * If the previous thread used spe in the last quantum
640 	 * (thus changing spe regs) then save them.
641 	 *
642 	 * On SMP we always save/restore spe regs just to avoid the
643 	 * complexity of changing processors.
644 	 */
645 	if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
646 		giveup_spe(prev);
647 #endif /* CONFIG_SPE */
648 
649 #else  /* CONFIG_SMP */
650 #ifdef CONFIG_ALTIVEC
651 	/* Avoid the trap.  On smp this this never happens since
652 	 * we don't set last_task_used_altivec -- Cort
653 	 */
654 	if (new->thread.regs && last_task_used_altivec == new)
655 		new->thread.regs->msr |= MSR_VEC;
656 #endif /* CONFIG_ALTIVEC */
657 #ifdef CONFIG_VSX
658 	if (new->thread.regs && last_task_used_vsx == new)
659 		new->thread.regs->msr |= MSR_VSX;
660 #endif /* CONFIG_VSX */
661 #ifdef CONFIG_SPE
662 	/* Avoid the trap.  On smp this this never happens since
663 	 * we don't set last_task_used_spe
664 	 */
665 	if (new->thread.regs && last_task_used_spe == new)
666 		new->thread.regs->msr |= MSR_SPE;
667 #endif /* CONFIG_SPE */
668 
669 #endif /* CONFIG_SMP */
670 
671 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
672 	switch_booke_debug_regs(&new->thread);
673 #else
674 /*
675  * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
676  * schedule DABR
677  */
678 #ifndef CONFIG_HAVE_HW_BREAKPOINT
679 	if (unlikely(hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk)))
680 		set_breakpoint(&new->thread.hw_brk);
681 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
682 #endif
683 
684 
685 	new_thread = &new->thread;
686 	old_thread = &current->thread;
687 
688 #ifdef CONFIG_PPC64
689 	/*
690 	 * Collect processor utilization data per process
691 	 */
692 	if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
693 		struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array);
694 		long unsigned start_tb, current_tb;
695 		start_tb = old_thread->start_tb;
696 		cu->current_tb = current_tb = mfspr(SPRN_PURR);
697 		old_thread->accum_tb += (current_tb - start_tb);
698 		new_thread->start_tb = current_tb;
699 	}
700 #endif /* CONFIG_PPC64 */
701 
702 #ifdef CONFIG_PPC_BOOK3S_64
703 	batch = &__get_cpu_var(ppc64_tlb_batch);
704 	if (batch->active) {
705 		current_thread_info()->local_flags |= _TLF_LAZY_MMU;
706 		if (batch->index)
707 			__flush_tlb_pending(batch);
708 		batch->active = 0;
709 	}
710 #endif /* CONFIG_PPC_BOOK3S_64 */
711 
712 	local_irq_save(flags);
713 
714 	/*
715 	 * We can't take a PMU exception inside _switch() since there is a
716 	 * window where the kernel stack SLB and the kernel stack are out
717 	 * of sync. Hard disable here.
718 	 */
719 	hard_irq_disable();
720 
721 	tm_recheckpoint_new_task(new);
722 
723 	last = _switch(old_thread, new_thread);
724 
725 #ifdef CONFIG_PPC_BOOK3S_64
726 	if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
727 		current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
728 		batch = &__get_cpu_var(ppc64_tlb_batch);
729 		batch->active = 1;
730 	}
731 #endif /* CONFIG_PPC_BOOK3S_64 */
732 
733 	local_irq_restore(flags);
734 
735 	return last;
736 }
737 
738 static int instructions_to_print = 16;
739 
show_instructions(struct pt_regs * regs)740 static void show_instructions(struct pt_regs *regs)
741 {
742 	int i;
743 	unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
744 			sizeof(int));
745 
746 	printk("Instruction dump:");
747 
748 	for (i = 0; i < instructions_to_print; i++) {
749 		int instr;
750 
751 		if (!(i % 8))
752 			printk("\n");
753 
754 #if !defined(CONFIG_BOOKE)
755 		/* If executing with the IMMU off, adjust pc rather
756 		 * than print XXXXXXXX.
757 		 */
758 		if (!(regs->msr & MSR_IR))
759 			pc = (unsigned long)phys_to_virt(pc);
760 #endif
761 
762 		/* We use __get_user here *only* to avoid an OOPS on a
763 		 * bad address because the pc *should* only be a
764 		 * kernel address.
765 		 */
766 		if (!__kernel_text_address(pc) ||
767 		     __get_user(instr, (unsigned int __user *)pc)) {
768 			printk(KERN_CONT "XXXXXXXX ");
769 		} else {
770 			if (regs->nip == pc)
771 				printk(KERN_CONT "<%08x> ", instr);
772 			else
773 				printk(KERN_CONT "%08x ", instr);
774 		}
775 
776 		pc += sizeof(int);
777 	}
778 
779 	printk("\n");
780 }
781 
782 static struct regbit {
783 	unsigned long bit;
784 	const char *name;
785 } msr_bits[] = {
786 #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
787 	{MSR_SF,	"SF"},
788 	{MSR_HV,	"HV"},
789 #endif
790 	{MSR_VEC,	"VEC"},
791 	{MSR_VSX,	"VSX"},
792 #ifdef CONFIG_BOOKE
793 	{MSR_CE,	"CE"},
794 #endif
795 	{MSR_EE,	"EE"},
796 	{MSR_PR,	"PR"},
797 	{MSR_FP,	"FP"},
798 	{MSR_ME,	"ME"},
799 #ifdef CONFIG_BOOKE
800 	{MSR_DE,	"DE"},
801 #else
802 	{MSR_SE,	"SE"},
803 	{MSR_BE,	"BE"},
804 #endif
805 	{MSR_IR,	"IR"},
806 	{MSR_DR,	"DR"},
807 	{MSR_PMM,	"PMM"},
808 #ifndef CONFIG_BOOKE
809 	{MSR_RI,	"RI"},
810 	{MSR_LE,	"LE"},
811 #endif
812 	{0,		NULL}
813 };
814 
printbits(unsigned long val,struct regbit * bits)815 static void printbits(unsigned long val, struct regbit *bits)
816 {
817 	const char *sep = "";
818 
819 	printk("<");
820 	for (; bits->bit; ++bits)
821 		if (val & bits->bit) {
822 			printk("%s%s", sep, bits->name);
823 			sep = ",";
824 		}
825 	printk(">");
826 }
827 
828 #ifdef CONFIG_PPC64
829 #define REG		"%016lx"
830 #define REGS_PER_LINE	4
831 #define LAST_VOLATILE	13
832 #else
833 #define REG		"%08lx"
834 #define REGS_PER_LINE	8
835 #define LAST_VOLATILE	12
836 #endif
837 
show_regs(struct pt_regs * regs)838 void show_regs(struct pt_regs * regs)
839 {
840 	int i, trap;
841 
842 	show_regs_print_info(KERN_DEFAULT);
843 
844 	printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
845 	       regs->nip, regs->link, regs->ctr);
846 	printk("REGS: %p TRAP: %04lx   %s  (%s)\n",
847 	       regs, regs->trap, print_tainted(), init_utsname()->release);
848 	printk("MSR: "REG" ", regs->msr);
849 	printbits(regs->msr, msr_bits);
850 	printk("  CR: %08lx  XER: %08lx\n", regs->ccr, regs->xer);
851 #ifdef CONFIG_PPC64
852 	printk("SOFTE: %ld\n", regs->softe);
853 #endif
854 	trap = TRAP(regs);
855 	if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
856 		printk("CFAR: "REG"\n", regs->orig_gpr3);
857 	if (trap == 0x300 || trap == 0x600)
858 #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
859 		printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
860 #else
861 		printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
862 #endif
863 
864 	for (i = 0;  i < 32;  i++) {
865 		if ((i % REGS_PER_LINE) == 0)
866 			printk("\nGPR%02d: ", i);
867 		printk(REG " ", regs->gpr[i]);
868 		if (i == LAST_VOLATILE && !FULL_REGS(regs))
869 			break;
870 	}
871 	printk("\n");
872 #ifdef CONFIG_KALLSYMS
873 	/*
874 	 * Lookup NIP late so we have the best change of getting the
875 	 * above info out without failing
876 	 */
877 	printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
878 	printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
879 #endif
880 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
881 	printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
882 #endif
883 	show_stack(current, (unsigned long *) regs->gpr[1]);
884 	if (!user_mode(regs))
885 		show_instructions(regs);
886 }
887 
exit_thread(void)888 void exit_thread(void)
889 {
890 	discard_lazy_cpu_state();
891 }
892 
flush_thread(void)893 void flush_thread(void)
894 {
895 	discard_lazy_cpu_state();
896 
897 #ifdef CONFIG_HAVE_HW_BREAKPOINT
898 	flush_ptrace_hw_breakpoint(current);
899 #else /* CONFIG_HAVE_HW_BREAKPOINT */
900 	set_debug_reg_defaults(&current->thread);
901 #endif /* CONFIG_HAVE_HW_BREAKPOINT */
902 }
903 
904 void
release_thread(struct task_struct * t)905 release_thread(struct task_struct *t)
906 {
907 }
908 
909 /*
910  * this gets called so that we can store coprocessor state into memory and
911  * copy the current task into the new thread.
912  */
arch_dup_task_struct(struct task_struct * dst,struct task_struct * src)913 int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
914 {
915 	flush_fp_to_thread(src);
916 	flush_altivec_to_thread(src);
917 	flush_vsx_to_thread(src);
918 	flush_spe_to_thread(src);
919 	*dst = *src;
920 	return 0;
921 }
922 
923 /*
924  * Copy a thread..
925  */
926 extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
927 
copy_thread(unsigned long clone_flags,unsigned long usp,unsigned long arg,struct task_struct * p)928 int copy_thread(unsigned long clone_flags, unsigned long usp,
929 		unsigned long arg, struct task_struct *p)
930 {
931 	struct pt_regs *childregs, *kregs;
932 	extern void ret_from_fork(void);
933 	extern void ret_from_kernel_thread(void);
934 	void (*f)(void);
935 	unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
936 
937 	/* Copy registers */
938 	sp -= sizeof(struct pt_regs);
939 	childregs = (struct pt_regs *) sp;
940 	if (unlikely(p->flags & PF_KTHREAD)) {
941 		struct thread_info *ti = (void *)task_stack_page(p);
942 		memset(childregs, 0, sizeof(struct pt_regs));
943 		childregs->gpr[1] = sp + sizeof(struct pt_regs);
944 		childregs->gpr[14] = usp;	/* function */
945 #ifdef CONFIG_PPC64
946 		clear_tsk_thread_flag(p, TIF_32BIT);
947 		childregs->softe = 1;
948 #endif
949 		childregs->gpr[15] = arg;
950 		p->thread.regs = NULL;	/* no user register state */
951 		ti->flags |= _TIF_RESTOREALL;
952 		f = ret_from_kernel_thread;
953 	} else {
954 		struct pt_regs *regs = current_pt_regs();
955 		CHECK_FULL_REGS(regs);
956 		*childregs = *regs;
957 		if (usp)
958 			childregs->gpr[1] = usp;
959 		p->thread.regs = childregs;
960 		childregs->gpr[3] = 0;  /* Result from fork() */
961 		if (clone_flags & CLONE_SETTLS) {
962 #ifdef CONFIG_PPC64
963 			if (!is_32bit_task())
964 				childregs->gpr[13] = childregs->gpr[6];
965 			else
966 #endif
967 				childregs->gpr[2] = childregs->gpr[6];
968 		}
969 
970 		f = ret_from_fork;
971 	}
972 	sp -= STACK_FRAME_OVERHEAD;
973 
974 	/*
975 	 * The way this works is that at some point in the future
976 	 * some task will call _switch to switch to the new task.
977 	 * That will pop off the stack frame created below and start
978 	 * the new task running at ret_from_fork.  The new task will
979 	 * do some house keeping and then return from the fork or clone
980 	 * system call, using the stack frame created above.
981 	 */
982 	((unsigned long *)sp)[0] = 0;
983 	sp -= sizeof(struct pt_regs);
984 	kregs = (struct pt_regs *) sp;
985 	sp -= STACK_FRAME_OVERHEAD;
986 	p->thread.ksp = sp;
987 	p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
988 				_ALIGN_UP(sizeof(struct thread_info), 16);
989 
990 #ifdef CONFIG_HAVE_HW_BREAKPOINT
991 	p->thread.ptrace_bps[0] = NULL;
992 #endif
993 
994 #ifdef CONFIG_PPC_STD_MMU_64
995 	if (mmu_has_feature(MMU_FTR_SLB)) {
996 		unsigned long sp_vsid;
997 		unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
998 
999 		if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1000 			sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
1001 				<< SLB_VSID_SHIFT_1T;
1002 		else
1003 			sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
1004 				<< SLB_VSID_SHIFT;
1005 		sp_vsid |= SLB_VSID_KERNEL | llp;
1006 		p->thread.ksp_vsid = sp_vsid;
1007 	}
1008 #endif /* CONFIG_PPC_STD_MMU_64 */
1009 #ifdef CONFIG_PPC64
1010 	if (cpu_has_feature(CPU_FTR_DSCR)) {
1011 		p->thread.dscr_inherit = current->thread.dscr_inherit;
1012 		p->thread.dscr = current->thread.dscr;
1013 	}
1014 	if (cpu_has_feature(CPU_FTR_HAS_PPR))
1015 		p->thread.ppr = INIT_PPR;
1016 #endif
1017 	/*
1018 	 * The PPC64 ABI makes use of a TOC to contain function
1019 	 * pointers.  The function (ret_from_except) is actually a pointer
1020 	 * to the TOC entry.  The first entry is a pointer to the actual
1021 	 * function.
1022 	 */
1023 #ifdef CONFIG_PPC64
1024 	kregs->nip = *((unsigned long *)f);
1025 #else
1026 	kregs->nip = (unsigned long)f;
1027 #endif
1028 	return 0;
1029 }
1030 
1031 /*
1032  * Set up a thread for executing a new program
1033  */
start_thread(struct pt_regs * regs,unsigned long start,unsigned long sp)1034 void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
1035 {
1036 #ifdef CONFIG_PPC64
1037 	unsigned long load_addr = regs->gpr[2];	/* saved by ELF_PLAT_INIT */
1038 #endif
1039 
1040 	/*
1041 	 * If we exec out of a kernel thread then thread.regs will not be
1042 	 * set.  Do it now.
1043 	 */
1044 	if (!current->thread.regs) {
1045 		struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
1046 		current->thread.regs = regs - 1;
1047 	}
1048 
1049 	memset(regs->gpr, 0, sizeof(regs->gpr));
1050 	regs->ctr = 0;
1051 	regs->link = 0;
1052 	regs->xer = 0;
1053 	regs->ccr = 0;
1054 	regs->gpr[1] = sp;
1055 
1056 	/*
1057 	 * We have just cleared all the nonvolatile GPRs, so make
1058 	 * FULL_REGS(regs) return true.  This is necessary to allow
1059 	 * ptrace to examine the thread immediately after exec.
1060 	 */
1061 	regs->trap &= ~1UL;
1062 
1063 #ifdef CONFIG_PPC32
1064 	regs->mq = 0;
1065 	regs->nip = start;
1066 	regs->msr = MSR_USER;
1067 #else
1068 	if (!is_32bit_task()) {
1069 		unsigned long entry, toc;
1070 
1071 		/* start is a relocated pointer to the function descriptor for
1072 		 * the elf _start routine.  The first entry in the function
1073 		 * descriptor is the entry address of _start and the second
1074 		 * entry is the TOC value we need to use.
1075 		 */
1076 		__get_user(entry, (unsigned long __user *)start);
1077 		__get_user(toc, (unsigned long __user *)start+1);
1078 
1079 		/* Check whether the e_entry function descriptor entries
1080 		 * need to be relocated before we can use them.
1081 		 */
1082 		if (load_addr != 0) {
1083 			entry += load_addr;
1084 			toc   += load_addr;
1085 		}
1086 		regs->nip = entry;
1087 		regs->gpr[2] = toc;
1088 		regs->msr = MSR_USER64;
1089 	} else {
1090 		regs->nip = start;
1091 		regs->gpr[2] = 0;
1092 		regs->msr = MSR_USER32;
1093 	}
1094 #endif
1095 	discard_lazy_cpu_state();
1096 #ifdef CONFIG_VSX
1097 	current->thread.used_vsr = 0;
1098 #endif
1099 	memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
1100 	current->thread.fpscr.val = 0;
1101 #ifdef CONFIG_ALTIVEC
1102 	memset(current->thread.vr, 0, sizeof(current->thread.vr));
1103 	memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
1104 	current->thread.vscr.u[3] = 0x00010000; /* Java mode disabled */
1105 	current->thread.vrsave = 0;
1106 	current->thread.used_vr = 0;
1107 #endif /* CONFIG_ALTIVEC */
1108 #ifdef CONFIG_SPE
1109 	memset(current->thread.evr, 0, sizeof(current->thread.evr));
1110 	current->thread.acc = 0;
1111 	current->thread.spefscr = 0;
1112 	current->thread.used_spe = 0;
1113 #endif /* CONFIG_SPE */
1114 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1115 	if (cpu_has_feature(CPU_FTR_TM))
1116 		regs->msr |= MSR_TM;
1117 	current->thread.tm_tfhar = 0;
1118 	current->thread.tm_texasr = 0;
1119 	current->thread.tm_tfiar = 0;
1120 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
1121 }
1122 
1123 #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
1124 		| PR_FP_EXC_RES | PR_FP_EXC_INV)
1125 
set_fpexc_mode(struct task_struct * tsk,unsigned int val)1126 int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
1127 {
1128 	struct pt_regs *regs = tsk->thread.regs;
1129 
1130 	/* This is a bit hairy.  If we are an SPE enabled  processor
1131 	 * (have embedded fp) we store the IEEE exception enable flags in
1132 	 * fpexc_mode.  fpexc_mode is also used for setting FP exception
1133 	 * mode (asyn, precise, disabled) for 'Classic' FP. */
1134 	if (val & PR_FP_EXC_SW_ENABLE) {
1135 #ifdef CONFIG_SPE
1136 		if (cpu_has_feature(CPU_FTR_SPE)) {
1137 			tsk->thread.fpexc_mode = val &
1138 				(PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
1139 			return 0;
1140 		} else {
1141 			return -EINVAL;
1142 		}
1143 #else
1144 		return -EINVAL;
1145 #endif
1146 	}
1147 
1148 	/* on a CONFIG_SPE this does not hurt us.  The bits that
1149 	 * __pack_fe01 use do not overlap with bits used for
1150 	 * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
1151 	 * on CONFIG_SPE implementations are reserved so writing to
1152 	 * them does not change anything */
1153 	if (val > PR_FP_EXC_PRECISE)
1154 		return -EINVAL;
1155 	tsk->thread.fpexc_mode = __pack_fe01(val);
1156 	if (regs != NULL && (regs->msr & MSR_FP) != 0)
1157 		regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
1158 			| tsk->thread.fpexc_mode;
1159 	return 0;
1160 }
1161 
get_fpexc_mode(struct task_struct * tsk,unsigned long adr)1162 int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
1163 {
1164 	unsigned int val;
1165 
1166 	if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
1167 #ifdef CONFIG_SPE
1168 		if (cpu_has_feature(CPU_FTR_SPE))
1169 			val = tsk->thread.fpexc_mode;
1170 		else
1171 			return -EINVAL;
1172 #else
1173 		return -EINVAL;
1174 #endif
1175 	else
1176 		val = __unpack_fe01(tsk->thread.fpexc_mode);
1177 	return put_user(val, (unsigned int __user *) adr);
1178 }
1179 
set_endian(struct task_struct * tsk,unsigned int val)1180 int set_endian(struct task_struct *tsk, unsigned int val)
1181 {
1182 	struct pt_regs *regs = tsk->thread.regs;
1183 
1184 	if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
1185 	    (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
1186 		return -EINVAL;
1187 
1188 	if (regs == NULL)
1189 		return -EINVAL;
1190 
1191 	if (val == PR_ENDIAN_BIG)
1192 		regs->msr &= ~MSR_LE;
1193 	else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
1194 		regs->msr |= MSR_LE;
1195 	else
1196 		return -EINVAL;
1197 
1198 	return 0;
1199 }
1200 
get_endian(struct task_struct * tsk,unsigned long adr)1201 int get_endian(struct task_struct *tsk, unsigned long adr)
1202 {
1203 	struct pt_regs *regs = tsk->thread.regs;
1204 	unsigned int val;
1205 
1206 	if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
1207 	    !cpu_has_feature(CPU_FTR_REAL_LE))
1208 		return -EINVAL;
1209 
1210 	if (regs == NULL)
1211 		return -EINVAL;
1212 
1213 	if (regs->msr & MSR_LE) {
1214 		if (cpu_has_feature(CPU_FTR_REAL_LE))
1215 			val = PR_ENDIAN_LITTLE;
1216 		else
1217 			val = PR_ENDIAN_PPC_LITTLE;
1218 	} else
1219 		val = PR_ENDIAN_BIG;
1220 
1221 	return put_user(val, (unsigned int __user *)adr);
1222 }
1223 
set_unalign_ctl(struct task_struct * tsk,unsigned int val)1224 int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
1225 {
1226 	tsk->thread.align_ctl = val;
1227 	return 0;
1228 }
1229 
get_unalign_ctl(struct task_struct * tsk,unsigned long adr)1230 int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
1231 {
1232 	return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
1233 }
1234 
valid_irq_stack(unsigned long sp,struct task_struct * p,unsigned long nbytes)1235 static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
1236 				  unsigned long nbytes)
1237 {
1238 	unsigned long stack_page;
1239 	unsigned long cpu = task_cpu(p);
1240 
1241 	/*
1242 	 * Avoid crashing if the stack has overflowed and corrupted
1243 	 * task_cpu(p), which is in the thread_info struct.
1244 	 */
1245 	if (cpu < NR_CPUS && cpu_possible(cpu)) {
1246 		stack_page = (unsigned long) hardirq_ctx[cpu];
1247 		if (sp >= stack_page + sizeof(struct thread_struct)
1248 		    && sp <= stack_page + THREAD_SIZE - nbytes)
1249 			return 1;
1250 
1251 		stack_page = (unsigned long) softirq_ctx[cpu];
1252 		if (sp >= stack_page + sizeof(struct thread_struct)
1253 		    && sp <= stack_page + THREAD_SIZE - nbytes)
1254 			return 1;
1255 	}
1256 	return 0;
1257 }
1258 
validate_sp(unsigned long sp,struct task_struct * p,unsigned long nbytes)1259 int validate_sp(unsigned long sp, struct task_struct *p,
1260 		       unsigned long nbytes)
1261 {
1262 	unsigned long stack_page = (unsigned long)task_stack_page(p);
1263 
1264 	if (sp >= stack_page + sizeof(struct thread_struct)
1265 	    && sp <= stack_page + THREAD_SIZE - nbytes)
1266 		return 1;
1267 
1268 	return valid_irq_stack(sp, p, nbytes);
1269 }
1270 
1271 EXPORT_SYMBOL(validate_sp);
1272 
get_wchan(struct task_struct * p)1273 unsigned long get_wchan(struct task_struct *p)
1274 {
1275 	unsigned long ip, sp;
1276 	int count = 0;
1277 
1278 	if (!p || p == current || p->state == TASK_RUNNING)
1279 		return 0;
1280 
1281 	sp = p->thread.ksp;
1282 	if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1283 		return 0;
1284 
1285 	do {
1286 		sp = *(unsigned long *)sp;
1287 		if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
1288 			return 0;
1289 		if (count > 0) {
1290 			ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
1291 			if (!in_sched_functions(ip))
1292 				return ip;
1293 		}
1294 	} while (count++ < 16);
1295 	return 0;
1296 }
1297 
1298 static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
1299 
show_stack(struct task_struct * tsk,unsigned long * stack)1300 void show_stack(struct task_struct *tsk, unsigned long *stack)
1301 {
1302 	unsigned long sp, ip, lr, newsp;
1303 	int count = 0;
1304 	int firstframe = 1;
1305 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1306 	int curr_frame = current->curr_ret_stack;
1307 	extern void return_to_handler(void);
1308 	unsigned long rth = (unsigned long)return_to_handler;
1309 	unsigned long mrth = -1;
1310 #ifdef CONFIG_PPC64
1311 	extern void mod_return_to_handler(void);
1312 	rth = *(unsigned long *)rth;
1313 	mrth = (unsigned long)mod_return_to_handler;
1314 	mrth = *(unsigned long *)mrth;
1315 #endif
1316 #endif
1317 
1318 	sp = (unsigned long) stack;
1319 	if (tsk == NULL)
1320 		tsk = current;
1321 	if (sp == 0) {
1322 		if (tsk == current)
1323 			asm("mr %0,1" : "=r" (sp));
1324 		else
1325 			sp = tsk->thread.ksp;
1326 	}
1327 
1328 	lr = 0;
1329 	printk("Call Trace:\n");
1330 	do {
1331 		if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
1332 			return;
1333 
1334 		stack = (unsigned long *) sp;
1335 		newsp = stack[0];
1336 		ip = stack[STACK_FRAME_LR_SAVE];
1337 		if (!firstframe || ip != lr) {
1338 			printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1339 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1340 			if ((ip == rth || ip == mrth) && curr_frame >= 0) {
1341 				printk(" (%pS)",
1342 				       (void *)current->ret_stack[curr_frame].ret);
1343 				curr_frame--;
1344 			}
1345 #endif
1346 			if (firstframe)
1347 				printk(" (unreliable)");
1348 			printk("\n");
1349 		}
1350 		firstframe = 0;
1351 
1352 		/*
1353 		 * See if this is an exception frame.
1354 		 * We look for the "regshere" marker in the current frame.
1355 		 */
1356 		if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
1357 		    && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
1358 			struct pt_regs *regs = (struct pt_regs *)
1359 				(sp + STACK_FRAME_OVERHEAD);
1360 			lr = regs->link;
1361 			printk("--- Exception: %lx at %pS\n    LR = %pS\n",
1362 			       regs->trap, (void *)regs->nip, (void *)lr);
1363 			firstframe = 1;
1364 		}
1365 
1366 		sp = newsp;
1367 	} while (count++ < kstack_depth_to_print);
1368 }
1369 
1370 #ifdef CONFIG_PPC64
1371 /* Called with hard IRQs off */
__ppc64_runlatch_on(void)1372 void notrace __ppc64_runlatch_on(void)
1373 {
1374 	struct thread_info *ti = current_thread_info();
1375 	unsigned long ctrl;
1376 
1377 	ctrl = mfspr(SPRN_CTRLF);
1378 	ctrl |= CTRL_RUNLATCH;
1379 	mtspr(SPRN_CTRLT, ctrl);
1380 
1381 	ti->local_flags |= _TLF_RUNLATCH;
1382 }
1383 
1384 /* Called with hard IRQs off */
__ppc64_runlatch_off(void)1385 void notrace __ppc64_runlatch_off(void)
1386 {
1387 	struct thread_info *ti = current_thread_info();
1388 	unsigned long ctrl;
1389 
1390 	ti->local_flags &= ~_TLF_RUNLATCH;
1391 
1392 	ctrl = mfspr(SPRN_CTRLF);
1393 	ctrl &= ~CTRL_RUNLATCH;
1394 	mtspr(SPRN_CTRLT, ctrl);
1395 }
1396 #endif /* CONFIG_PPC64 */
1397 
arch_align_stack(unsigned long sp)1398 unsigned long arch_align_stack(unsigned long sp)
1399 {
1400 	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1401 		sp -= get_random_int() & ~PAGE_MASK;
1402 	return sp & ~0xf;
1403 }
1404 
brk_rnd(void)1405 static inline unsigned long brk_rnd(void)
1406 {
1407         unsigned long rnd = 0;
1408 
1409 	/* 8MB for 32bit, 1GB for 64bit */
1410 	if (is_32bit_task())
1411 		rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
1412 	else
1413 		rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
1414 
1415 	return rnd << PAGE_SHIFT;
1416 }
1417 
arch_randomize_brk(struct mm_struct * mm)1418 unsigned long arch_randomize_brk(struct mm_struct *mm)
1419 {
1420 	unsigned long base = mm->brk;
1421 	unsigned long ret;
1422 
1423 #ifdef CONFIG_PPC_STD_MMU_64
1424 	/*
1425 	 * If we are using 1TB segments and we are allowed to randomise
1426 	 * the heap, we can put it above 1TB so it is backed by a 1TB
1427 	 * segment. Otherwise the heap will be in the bottom 1TB
1428 	 * which always uses 256MB segments and this may result in a
1429 	 * performance penalty.
1430 	 */
1431 	if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
1432 		base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
1433 #endif
1434 
1435 	ret = PAGE_ALIGN(base + brk_rnd());
1436 
1437 	if (ret < mm->brk)
1438 		return mm->brk;
1439 
1440 	return ret;
1441 }
1442 
randomize_et_dyn(unsigned long base)1443 unsigned long randomize_et_dyn(unsigned long base)
1444 {
1445 	unsigned long ret = PAGE_ALIGN(base + brk_rnd());
1446 
1447 	if (ret < base)
1448 		return base;
1449 
1450 	return ret;
1451 }
1452