• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
4  *
5  *  PowerPC version
6  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7  * Copyright (C) 2001 IBM
8  * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9  * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
10  *
11  *  Derived from "arch/i386/kernel/signal.c"
12  *    Copyright (C) 1991, 1992 Linus Torvalds
13  *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
14  */
15 
16 #include <linux/sched.h>
17 #include <linux/mm.h>
18 #include <linux/smp.h>
19 #include <linux/kernel.h>
20 #include <linux/signal.h>
21 #include <linux/errno.h>
22 #include <linux/elf.h>
23 #include <linux/ptrace.h>
24 #include <linux/pagemap.h>
25 #include <linux/ratelimit.h>
26 #include <linux/syscalls.h>
27 #ifdef CONFIG_PPC64
28 #include <linux/compat.h>
29 #else
30 #include <linux/wait.h>
31 #include <linux/unistd.h>
32 #include <linux/stddef.h>
33 #include <linux/tty.h>
34 #include <linux/binfmts.h>
35 #endif
36 
37 #include <linux/uaccess.h>
38 #include <asm/cacheflush.h>
39 #include <asm/syscalls.h>
40 #include <asm/sigcontext.h>
41 #include <asm/vdso.h>
42 #include <asm/switch_to.h>
43 #include <asm/tm.h>
44 #include <asm/asm-prototypes.h>
45 #ifdef CONFIG_PPC64
46 #include "ppc32.h"
47 #include <asm/unistd.h>
48 #else
49 #include <asm/ucontext.h>
50 #endif
51 
52 #include "signal.h"
53 
54 
55 #ifdef CONFIG_PPC64
56 #define old_sigaction	old_sigaction32
57 #define sigcontext	sigcontext32
58 #define mcontext	mcontext32
59 #define ucontext	ucontext32
60 
61 #define __save_altstack __compat_save_altstack
62 
63 /*
64  * Userspace code may pass a ucontext which doesn't include VSX added
65  * at the end.  We need to check for this case.
66  */
67 #define UCONTEXTSIZEWITHOUTVSX \
68 		(sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
69 
70 /*
71  * Returning 0 means we return to userspace via
72  * ret_from_except and thus restore all user
73  * registers from *regs.  This is what we need
74  * to do when a signal has been delivered.
75  */
76 
77 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
78 #undef __SIGNAL_FRAMESIZE
79 #define __SIGNAL_FRAMESIZE	__SIGNAL_FRAMESIZE32
80 #undef ELF_NVRREG
81 #define ELF_NVRREG	ELF_NVRREG32
82 
83 /*
84  * Functions for flipping sigsets (thanks to brain dead generic
85  * implementation that makes things simple for little endian only)
86  */
put_sigset_t(compat_sigset_t __user * uset,sigset_t * set)87 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
88 {
89 	return put_compat_sigset(uset, set, sizeof(*uset));
90 }
91 
get_sigset_t(sigset_t * set,const compat_sigset_t __user * uset)92 static inline int get_sigset_t(sigset_t *set,
93 			       const compat_sigset_t __user *uset)
94 {
95 	return get_compat_sigset(set, uset);
96 }
97 
98 #define to_user_ptr(p)		ptr_to_compat(p)
99 #define from_user_ptr(p)	compat_ptr(p)
100 
save_general_regs(struct pt_regs * regs,struct mcontext __user * frame)101 static inline int save_general_regs(struct pt_regs *regs,
102 		struct mcontext __user *frame)
103 {
104 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
105 	int val, i;
106 
107 	WARN_ON(!FULL_REGS(regs));
108 
109 	for (i = 0; i <= PT_RESULT; i ++) {
110 		/* Force usr to alway see softe as 1 (interrupts enabled) */
111 		if (i == PT_SOFTE)
112 			val = 1;
113 		else
114 			val = gregs[i];
115 
116 		if (__put_user(val, &frame->mc_gregs[i]))
117 			return -EFAULT;
118 	}
119 	return 0;
120 }
121 
restore_general_regs(struct pt_regs * regs,struct mcontext __user * sr)122 static inline int restore_general_regs(struct pt_regs *regs,
123 		struct mcontext __user *sr)
124 {
125 	elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
126 	int i;
127 
128 	for (i = 0; i <= PT_RESULT; i++) {
129 		if ((i == PT_MSR) || (i == PT_SOFTE))
130 			continue;
131 		if (__get_user(gregs[i], &sr->mc_gregs[i]))
132 			return -EFAULT;
133 	}
134 	return 0;
135 }
136 
137 #else /* CONFIG_PPC64 */
138 
139 #define GP_REGS_SIZE	min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
140 
put_sigset_t(sigset_t __user * uset,sigset_t * set)141 static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
142 {
143 	return copy_to_user(uset, set, sizeof(*uset));
144 }
145 
get_sigset_t(sigset_t * set,const sigset_t __user * uset)146 static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
147 {
148 	return copy_from_user(set, uset, sizeof(*uset));
149 }
150 
151 #define to_user_ptr(p)		((unsigned long)(p))
152 #define from_user_ptr(p)	((void __user *)(p))
153 
save_general_regs(struct pt_regs * regs,struct mcontext __user * frame)154 static inline int save_general_regs(struct pt_regs *regs,
155 		struct mcontext __user *frame)
156 {
157 	WARN_ON(!FULL_REGS(regs));
158 	return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
159 }
160 
restore_general_regs(struct pt_regs * regs,struct mcontext __user * sr)161 static inline int restore_general_regs(struct pt_regs *regs,
162 		struct mcontext __user *sr)
163 {
164 	/* copy up to but not including MSR */
165 	if (__copy_from_user(regs, &sr->mc_gregs,
166 				PT_MSR * sizeof(elf_greg_t)))
167 		return -EFAULT;
168 	/* copy from orig_r3 (the word after the MSR) up to the end */
169 	if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
170 				GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
171 		return -EFAULT;
172 	return 0;
173 }
174 #endif
175 
176 /*
177  * When we have signals to deliver, we set up on the
178  * user stack, going down from the original stack pointer:
179  *	an ABI gap of 56 words
180  *	an mcontext struct
181  *	a sigcontext struct
182  *	a gap of __SIGNAL_FRAMESIZE bytes
183  *
184  * Each of these things must be a multiple of 16 bytes in size. The following
185  * structure represent all of this except the __SIGNAL_FRAMESIZE gap
186  *
187  */
188 struct sigframe {
189 	struct sigcontext sctx;		/* the sigcontext */
190 	struct mcontext	mctx;		/* all the register values */
191 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
192 	struct sigcontext sctx_transact;
193 	struct mcontext	mctx_transact;
194 #endif
195 	/*
196 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
197 	 * regs and 18 fp regs below sp before decrementing it.
198 	 */
199 	int			abigap[56];
200 };
201 
202 /* We use the mc_pad field for the signal return trampoline. */
203 #define tramp	mc_pad
204 
205 /*
206  *  When we have rt signals to deliver, we set up on the
207  *  user stack, going down from the original stack pointer:
208  *	one rt_sigframe struct (siginfo + ucontext + ABI gap)
209  *	a gap of __SIGNAL_FRAMESIZE+16 bytes
210  *  (the +16 is to get the siginfo and ucontext in the same
211  *  positions as in older kernels).
212  *
213  *  Each of these things must be a multiple of 16 bytes in size.
214  *
215  */
216 struct rt_sigframe {
217 #ifdef CONFIG_PPC64
218 	compat_siginfo_t info;
219 #else
220 	struct siginfo info;
221 #endif
222 	struct ucontext	uc;
223 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
224 	struct ucontext	uc_transact;
225 #endif
226 	/*
227 	 * Programs using the rs6000/xcoff abi can save up to 19 gp
228 	 * regs and 18 fp regs below sp before decrementing it.
229 	 */
230 	int			abigap[56];
231 };
232 
233 /*
234  * Save the current user registers on the user stack.
235  * We only save the altivec/spe registers if the process has used
236  * altivec/spe instructions at some point.
237  */
save_user_regs(struct pt_regs * regs,struct mcontext __user * frame,struct mcontext __user * tm_frame,int sigret,int ctx_has_vsx_region)238 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
239 			  struct mcontext __user *tm_frame, int sigret,
240 			  int ctx_has_vsx_region)
241 {
242 	unsigned long msr = regs->msr;
243 
244 	/* Make sure floating point registers are stored in regs */
245 	flush_fp_to_thread(current);
246 
247 	/* save general registers */
248 	if (save_general_regs(regs, frame))
249 		return 1;
250 
251 #ifdef CONFIG_ALTIVEC
252 	/* save altivec registers */
253 	if (current->thread.used_vr) {
254 		flush_altivec_to_thread(current);
255 		if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
256 				   ELF_NVRREG * sizeof(vector128)))
257 			return 1;
258 		/* set MSR_VEC in the saved MSR value to indicate that
259 		   frame->mc_vregs contains valid data */
260 		msr |= MSR_VEC;
261 	}
262 	/* else assert((regs->msr & MSR_VEC) == 0) */
263 
264 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
265 	 * use altivec. Since VSCR only contains 32 bits saved in the least
266 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
267 	 * most significant bits of that same vector. --BenH
268 	 * Note that the current VRSAVE value is in the SPR at this point.
269 	 */
270 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
271 		current->thread.vrsave = mfspr(SPRN_VRSAVE);
272 	if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
273 		return 1;
274 #endif /* CONFIG_ALTIVEC */
275 	if (copy_fpr_to_user(&frame->mc_fregs, current))
276 		return 1;
277 
278 	/*
279 	 * Clear the MSR VSX bit to indicate there is no valid state attached
280 	 * to this context, except in the specific case below where we set it.
281 	 */
282 	msr &= ~MSR_VSX;
283 #ifdef CONFIG_VSX
284 	/*
285 	 * Copy VSR 0-31 upper half from thread_struct to local
286 	 * buffer, then write that to userspace.  Also set MSR_VSX in
287 	 * the saved MSR value to indicate that frame->mc_vregs
288 	 * contains valid data
289 	 */
290 	if (current->thread.used_vsr && ctx_has_vsx_region) {
291 		flush_vsx_to_thread(current);
292 		if (copy_vsx_to_user(&frame->mc_vsregs, current))
293 			return 1;
294 		msr |= MSR_VSX;
295 	}
296 #endif /* CONFIG_VSX */
297 #ifdef CONFIG_SPE
298 	/* save spe registers */
299 	if (current->thread.used_spe) {
300 		flush_spe_to_thread(current);
301 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
302 				   ELF_NEVRREG * sizeof(u32)))
303 			return 1;
304 		/* set MSR_SPE in the saved MSR value to indicate that
305 		   frame->mc_vregs contains valid data */
306 		msr |= MSR_SPE;
307 	}
308 	/* else assert((regs->msr & MSR_SPE) == 0) */
309 
310 	/* We always copy to/from spefscr */
311 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
312 		return 1;
313 #endif /* CONFIG_SPE */
314 
315 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
316 		return 1;
317 	/* We need to write 0 the MSR top 32 bits in the tm frame so that we
318 	 * can check it on the restore to see if TM is active
319 	 */
320 	if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
321 		return 1;
322 
323 	if (sigret) {
324 		/* Set up the sigreturn trampoline: li 0,sigret; sc */
325 		if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
326 		    || __put_user(PPC_INST_SC, &frame->tramp[1]))
327 			return 1;
328 		flush_icache_range((unsigned long) &frame->tramp[0],
329 				   (unsigned long) &frame->tramp[2]);
330 	}
331 
332 	return 0;
333 }
334 
335 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
336 /*
337  * Save the current user registers on the user stack.
338  * We only save the altivec/spe registers if the process has used
339  * altivec/spe instructions at some point.
340  * We also save the transactional registers to a second ucontext in the
341  * frame.
342  *
343  * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
344  */
save_tm_user_regs(struct pt_regs * regs,struct mcontext __user * frame,struct mcontext __user * tm_frame,int sigret,unsigned long msr)345 static int save_tm_user_regs(struct pt_regs *regs,
346 			     struct mcontext __user *frame,
347 			     struct mcontext __user *tm_frame, int sigret,
348 			     unsigned long msr)
349 {
350 	WARN_ON(tm_suspend_disabled);
351 
352 	/* Save both sets of general registers */
353 	if (save_general_regs(&current->thread.ckpt_regs, frame)
354 	    || save_general_regs(regs, tm_frame))
355 		return 1;
356 
357 	/* Stash the top half of the 64bit MSR into the 32bit MSR word
358 	 * of the transactional mcontext.  This way we have a backward-compatible
359 	 * MSR in the 'normal' (checkpointed) mcontext and additionally one can
360 	 * also look at what type of transaction (T or S) was active at the
361 	 * time of the signal.
362 	 */
363 	if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
364 		return 1;
365 
366 #ifdef CONFIG_ALTIVEC
367 	/* save altivec registers */
368 	if (current->thread.used_vr) {
369 		if (__copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
370 				   ELF_NVRREG * sizeof(vector128)))
371 			return 1;
372 		if (msr & MSR_VEC) {
373 			if (__copy_to_user(&tm_frame->mc_vregs,
374 					   &current->thread.vr_state,
375 					   ELF_NVRREG * sizeof(vector128)))
376 				return 1;
377 		} else {
378 			if (__copy_to_user(&tm_frame->mc_vregs,
379 					   &current->thread.ckvr_state,
380 					   ELF_NVRREG * sizeof(vector128)))
381 				return 1;
382 		}
383 
384 		/* set MSR_VEC in the saved MSR value to indicate that
385 		 * frame->mc_vregs contains valid data
386 		 */
387 		msr |= MSR_VEC;
388 	}
389 
390 	/* We always copy to/from vrsave, it's 0 if we don't have or don't
391 	 * use altivec. Since VSCR only contains 32 bits saved in the least
392 	 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
393 	 * most significant bits of that same vector. --BenH
394 	 */
395 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
396 		current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
397 	if (__put_user(current->thread.ckvrsave,
398 		       (u32 __user *)&frame->mc_vregs[32]))
399 		return 1;
400 	if (msr & MSR_VEC) {
401 		if (__put_user(current->thread.vrsave,
402 			       (u32 __user *)&tm_frame->mc_vregs[32]))
403 			return 1;
404 	} else {
405 		if (__put_user(current->thread.ckvrsave,
406 			       (u32 __user *)&tm_frame->mc_vregs[32]))
407 			return 1;
408 	}
409 #endif /* CONFIG_ALTIVEC */
410 
411 	if (copy_ckfpr_to_user(&frame->mc_fregs, current))
412 		return 1;
413 	if (msr & MSR_FP) {
414 		if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
415 			return 1;
416 	} else {
417 		if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
418 			return 1;
419 	}
420 
421 #ifdef CONFIG_VSX
422 	/*
423 	 * Copy VSR 0-31 upper half from thread_struct to local
424 	 * buffer, then write that to userspace.  Also set MSR_VSX in
425 	 * the saved MSR value to indicate that frame->mc_vregs
426 	 * contains valid data
427 	 */
428 	if (current->thread.used_vsr) {
429 		if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
430 			return 1;
431 		if (msr & MSR_VSX) {
432 			if (copy_vsx_to_user(&tm_frame->mc_vsregs,
433 						      current))
434 				return 1;
435 		} else {
436 			if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
437 				return 1;
438 		}
439 
440 		msr |= MSR_VSX;
441 	}
442 #endif /* CONFIG_VSX */
443 #ifdef CONFIG_SPE
444 	/* SPE regs are not checkpointed with TM, so this section is
445 	 * simply the same as in save_user_regs().
446 	 */
447 	if (current->thread.used_spe) {
448 		flush_spe_to_thread(current);
449 		if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
450 				   ELF_NEVRREG * sizeof(u32)))
451 			return 1;
452 		/* set MSR_SPE in the saved MSR value to indicate that
453 		 * frame->mc_vregs contains valid data */
454 		msr |= MSR_SPE;
455 	}
456 
457 	/* We always copy to/from spefscr */
458 	if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
459 		return 1;
460 #endif /* CONFIG_SPE */
461 
462 	if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
463 		return 1;
464 	if (sigret) {
465 		/* Set up the sigreturn trampoline: li 0,sigret; sc */
466 		if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
467 		    || __put_user(PPC_INST_SC, &frame->tramp[1]))
468 			return 1;
469 		flush_icache_range((unsigned long) &frame->tramp[0],
470 				   (unsigned long) &frame->tramp[2]);
471 	}
472 
473 	return 0;
474 }
475 #endif
476 
477 /*
478  * Restore the current user register values from the user stack,
479  * (except for MSR).
480  */
restore_user_regs(struct pt_regs * regs,struct mcontext __user * sr,int sig)481 static long restore_user_regs(struct pt_regs *regs,
482 			      struct mcontext __user *sr, int sig)
483 {
484 	long err;
485 	unsigned int save_r2 = 0;
486 	unsigned long msr;
487 #ifdef CONFIG_VSX
488 	int i;
489 #endif
490 
491 	/*
492 	 * restore general registers but not including MSR or SOFTE. Also
493 	 * take care of keeping r2 (TLS) intact if not a signal
494 	 */
495 	if (!sig)
496 		save_r2 = (unsigned int)regs->gpr[2];
497 	err = restore_general_regs(regs, sr);
498 	set_trap_norestart(regs);
499 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
500 	if (!sig)
501 		regs->gpr[2] = (unsigned long) save_r2;
502 	if (err)
503 		return 1;
504 
505 	/* if doing signal return, restore the previous little-endian mode */
506 	if (sig)
507 		regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
508 
509 #ifdef CONFIG_ALTIVEC
510 	/*
511 	 * Force the process to reload the altivec registers from
512 	 * current->thread when it next does altivec instructions
513 	 */
514 	regs->msr &= ~MSR_VEC;
515 	if (msr & MSR_VEC) {
516 		/* restore altivec registers from the stack */
517 		if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
518 				     sizeof(sr->mc_vregs)))
519 			return 1;
520 		current->thread.used_vr = true;
521 	} else if (current->thread.used_vr)
522 		memset(&current->thread.vr_state, 0,
523 		       ELF_NVRREG * sizeof(vector128));
524 
525 	/* Always get VRSAVE back */
526 	if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
527 		return 1;
528 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
529 		mtspr(SPRN_VRSAVE, current->thread.vrsave);
530 #endif /* CONFIG_ALTIVEC */
531 	if (copy_fpr_from_user(current, &sr->mc_fregs))
532 		return 1;
533 
534 #ifdef CONFIG_VSX
535 	/*
536 	 * Force the process to reload the VSX registers from
537 	 * current->thread when it next does VSX instruction.
538 	 */
539 	regs->msr &= ~MSR_VSX;
540 	if (msr & MSR_VSX) {
541 		/*
542 		 * Restore altivec registers from the stack to a local
543 		 * buffer, then write this out to the thread_struct
544 		 */
545 		if (copy_vsx_from_user(current, &sr->mc_vsregs))
546 			return 1;
547 		current->thread.used_vsr = true;
548 	} else if (current->thread.used_vsr)
549 		for (i = 0; i < 32 ; i++)
550 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
551 #endif /* CONFIG_VSX */
552 	/*
553 	 * force the process to reload the FP registers from
554 	 * current->thread when it next does FP instructions
555 	 */
556 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
557 
558 #ifdef CONFIG_SPE
559 	/* force the process to reload the spe registers from
560 	   current->thread when it next does spe instructions */
561 	regs->msr &= ~MSR_SPE;
562 	if (msr & MSR_SPE) {
563 		/* restore spe registers from the stack */
564 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
565 				     ELF_NEVRREG * sizeof(u32)))
566 			return 1;
567 		current->thread.used_spe = true;
568 	} else if (current->thread.used_spe)
569 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
570 
571 	/* Always get SPEFSCR back */
572 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
573 		return 1;
574 #endif /* CONFIG_SPE */
575 
576 	return 0;
577 }
578 
579 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
580 /*
581  * Restore the current user register values from the user stack, except for
582  * MSR, and recheckpoint the original checkpointed register state for processes
583  * in transactions.
584  */
restore_tm_user_regs(struct pt_regs * regs,struct mcontext __user * sr,struct mcontext __user * tm_sr)585 static long restore_tm_user_regs(struct pt_regs *regs,
586 				 struct mcontext __user *sr,
587 				 struct mcontext __user *tm_sr)
588 {
589 	long err;
590 	unsigned long msr, msr_hi;
591 #ifdef CONFIG_VSX
592 	int i;
593 #endif
594 
595 	if (tm_suspend_disabled)
596 		return 1;
597 	/*
598 	 * restore general registers but not including MSR or SOFTE. Also
599 	 * take care of keeping r2 (TLS) intact if not a signal.
600 	 * See comment in signal_64.c:restore_tm_sigcontexts();
601 	 * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
602 	 * were set by the signal delivery.
603 	 */
604 	err = restore_general_regs(regs, tm_sr);
605 	err |= restore_general_regs(&current->thread.ckpt_regs, sr);
606 
607 	err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
608 
609 	err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
610 	if (err)
611 		return 1;
612 
613 	/* Restore the previous little-endian mode */
614 	regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
615 
616 #ifdef CONFIG_ALTIVEC
617 	regs->msr &= ~MSR_VEC;
618 	if (msr & MSR_VEC) {
619 		/* restore altivec registers from the stack */
620 		if (__copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
621 				     sizeof(sr->mc_vregs)) ||
622 		    __copy_from_user(&current->thread.vr_state,
623 				     &tm_sr->mc_vregs,
624 				     sizeof(sr->mc_vregs)))
625 			return 1;
626 		current->thread.used_vr = true;
627 	} else if (current->thread.used_vr) {
628 		memset(&current->thread.vr_state, 0,
629 		       ELF_NVRREG * sizeof(vector128));
630 		memset(&current->thread.ckvr_state, 0,
631 		       ELF_NVRREG * sizeof(vector128));
632 	}
633 
634 	/* Always get VRSAVE back */
635 	if (__get_user(current->thread.ckvrsave,
636 		       (u32 __user *)&sr->mc_vregs[32]) ||
637 	    __get_user(current->thread.vrsave,
638 		       (u32 __user *)&tm_sr->mc_vregs[32]))
639 		return 1;
640 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
641 		mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
642 #endif /* CONFIG_ALTIVEC */
643 
644 	regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
645 
646 	if (copy_fpr_from_user(current, &sr->mc_fregs) ||
647 	    copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
648 		return 1;
649 
650 #ifdef CONFIG_VSX
651 	regs->msr &= ~MSR_VSX;
652 	if (msr & MSR_VSX) {
653 		/*
654 		 * Restore altivec registers from the stack to a local
655 		 * buffer, then write this out to the thread_struct
656 		 */
657 		if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
658 		    copy_ckvsx_from_user(current, &sr->mc_vsregs))
659 			return 1;
660 		current->thread.used_vsr = true;
661 	} else if (current->thread.used_vsr)
662 		for (i = 0; i < 32 ; i++) {
663 			current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
664 			current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
665 		}
666 #endif /* CONFIG_VSX */
667 
668 #ifdef CONFIG_SPE
669 	/* SPE regs are not checkpointed with TM, so this section is
670 	 * simply the same as in restore_user_regs().
671 	 */
672 	regs->msr &= ~MSR_SPE;
673 	if (msr & MSR_SPE) {
674 		if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
675 				     ELF_NEVRREG * sizeof(u32)))
676 			return 1;
677 		current->thread.used_spe = true;
678 	} else if (current->thread.used_spe)
679 		memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
680 
681 	/* Always get SPEFSCR back */
682 	if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
683 		       + ELF_NEVRREG))
684 		return 1;
685 #endif /* CONFIG_SPE */
686 
687 	/* Get the top half of the MSR from the user context */
688 	if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
689 		return 1;
690 	msr_hi <<= 32;
691 	/* If TM bits are set to the reserved value, it's an invalid context */
692 	if (MSR_TM_RESV(msr_hi))
693 		return 1;
694 
695 	/*
696 	 * Disabling preemption, since it is unsafe to be preempted
697 	 * with MSR[TS] set without recheckpointing.
698 	 */
699 	preempt_disable();
700 
701 	/*
702 	 * CAUTION:
703 	 * After regs->MSR[TS] being updated, make sure that get_user(),
704 	 * put_user() or similar functions are *not* called. These
705 	 * functions can generate page faults which will cause the process
706 	 * to be de-scheduled with MSR[TS] set but without calling
707 	 * tm_recheckpoint(). This can cause a bug.
708 	 *
709 	 * Pull in the MSR TM bits from the user context
710 	 */
711 	regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
712 	/* Now, recheckpoint.  This loads up all of the checkpointed (older)
713 	 * registers, including FP and V[S]Rs.  After recheckpointing, the
714 	 * transactional versions should be loaded.
715 	 */
716 	tm_enable();
717 	/* Make sure the transaction is marked as failed */
718 	current->thread.tm_texasr |= TEXASR_FS;
719 	/* This loads the checkpointed FP/VEC state, if used */
720 	tm_recheckpoint(&current->thread);
721 
722 	/* This loads the speculative FP/VEC state, if used */
723 	msr_check_and_set(msr & (MSR_FP | MSR_VEC));
724 	if (msr & MSR_FP) {
725 		load_fp_state(&current->thread.fp_state);
726 		regs->msr |= (MSR_FP | current->thread.fpexc_mode);
727 	}
728 #ifdef CONFIG_ALTIVEC
729 	if (msr & MSR_VEC) {
730 		load_vr_state(&current->thread.vr_state);
731 		regs->msr |= MSR_VEC;
732 	}
733 #endif
734 
735 	preempt_enable();
736 
737 	return 0;
738 }
739 #endif
740 
741 #ifdef CONFIG_PPC64
742 
743 #define copy_siginfo_to_user	copy_siginfo_to_user32
744 
745 #endif /* CONFIG_PPC64 */
746 
747 /*
748  * Set up a signal frame for a "real-time" signal handler
749  * (one which gets siginfo).
750  */
handle_rt_signal32(struct ksignal * ksig,sigset_t * oldset,struct task_struct * tsk)751 int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
752 		       struct task_struct *tsk)
753 {
754 	struct rt_sigframe __user *rt_sf;
755 	struct mcontext __user *frame;
756 	struct mcontext __user *tm_frame = NULL;
757 	void __user *addr;
758 	unsigned long newsp = 0;
759 	int sigret;
760 	unsigned long tramp;
761 	struct pt_regs *regs = tsk->thread.regs;
762 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
763 	/* Save the thread's msr before get_tm_stackpointer() changes it */
764 	unsigned long msr = regs->msr;
765 #endif
766 
767 	BUG_ON(tsk != current);
768 
769 	/* Set up Signal Frame */
770 	/* Put a Real Time Context onto stack */
771 	rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
772 	addr = rt_sf;
773 	if (unlikely(rt_sf == NULL))
774 		goto badframe;
775 
776 	/* Put the siginfo & fill in most of the ucontext */
777 	if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
778 	    || __put_user(0, &rt_sf->uc.uc_flags)
779 	    || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
780 	    || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
781 		    &rt_sf->uc.uc_regs)
782 	    || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
783 		goto badframe;
784 
785 	/* Save user registers on the stack */
786 	frame = &rt_sf->uc.uc_mcontext;
787 	addr = frame;
788 	if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
789 		sigret = 0;
790 		tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
791 	} else {
792 		sigret = __NR_rt_sigreturn;
793 		tramp = (unsigned long) frame->tramp;
794 	}
795 
796 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
797 	tm_frame = &rt_sf->uc_transact.uc_mcontext;
798 	if (MSR_TM_ACTIVE(msr)) {
799 		if (__put_user((unsigned long)&rt_sf->uc_transact,
800 			       &rt_sf->uc.uc_link) ||
801 		    __put_user((unsigned long)tm_frame,
802 			       &rt_sf->uc_transact.uc_regs))
803 			goto badframe;
804 		if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
805 			goto badframe;
806 	}
807 	else
808 #endif
809 	{
810 		if (__put_user(0, &rt_sf->uc.uc_link))
811 			goto badframe;
812 		if (save_user_regs(regs, frame, tm_frame, sigret, 1))
813 			goto badframe;
814 	}
815 	regs->link = tramp;
816 
817 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
818 
819 	/* create a stack frame for the caller of the handler */
820 	newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
821 	addr = (void __user *)regs->gpr[1];
822 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
823 		goto badframe;
824 
825 	/* Fill registers for signal handler */
826 	regs->gpr[1] = newsp;
827 	regs->gpr[3] = ksig->sig;
828 	regs->gpr[4] = (unsigned long) &rt_sf->info;
829 	regs->gpr[5] = (unsigned long) &rt_sf->uc;
830 	regs->gpr[6] = (unsigned long) rt_sf;
831 	regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
832 	/* enter the signal handler in native-endian mode */
833 	regs->msr &= ~MSR_LE;
834 	regs->msr |= (MSR_KERNEL & MSR_LE);
835 	return 0;
836 
837 badframe:
838 	if (show_unhandled_signals)
839 		printk_ratelimited(KERN_INFO
840 				   "%s[%d]: bad frame in handle_rt_signal32: "
841 				   "%p nip %08lx lr %08lx\n",
842 				   tsk->comm, tsk->pid,
843 				   addr, regs->nip, regs->link);
844 
845 	return 1;
846 }
847 
do_setcontext(struct ucontext __user * ucp,struct pt_regs * regs,int sig)848 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
849 {
850 	sigset_t set;
851 	struct mcontext __user *mcp;
852 
853 	if (get_sigset_t(&set, &ucp->uc_sigmask))
854 		return -EFAULT;
855 #ifdef CONFIG_PPC64
856 	{
857 		u32 cmcp;
858 
859 		if (__get_user(cmcp, &ucp->uc_regs))
860 			return -EFAULT;
861 		mcp = (struct mcontext __user *)(u64)cmcp;
862 		/* no need to check access_ok(mcp), since mcp < 4GB */
863 	}
864 #else
865 	if (__get_user(mcp, &ucp->uc_regs))
866 		return -EFAULT;
867 	if (!access_ok(mcp, sizeof(*mcp)))
868 		return -EFAULT;
869 #endif
870 	set_current_blocked(&set);
871 	if (restore_user_regs(regs, mcp, sig))
872 		return -EFAULT;
873 
874 	return 0;
875 }
876 
877 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
do_setcontext_tm(struct ucontext __user * ucp,struct ucontext __user * tm_ucp,struct pt_regs * regs)878 static int do_setcontext_tm(struct ucontext __user *ucp,
879 			    struct ucontext __user *tm_ucp,
880 			    struct pt_regs *regs)
881 {
882 	sigset_t set;
883 	struct mcontext __user *mcp;
884 	struct mcontext __user *tm_mcp;
885 	u32 cmcp;
886 	u32 tm_cmcp;
887 
888 	if (get_sigset_t(&set, &ucp->uc_sigmask))
889 		return -EFAULT;
890 
891 	if (__get_user(cmcp, &ucp->uc_regs) ||
892 	    __get_user(tm_cmcp, &tm_ucp->uc_regs))
893 		return -EFAULT;
894 	mcp = (struct mcontext __user *)(u64)cmcp;
895 	tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
896 	/* no need to check access_ok(mcp), since mcp < 4GB */
897 
898 	set_current_blocked(&set);
899 	if (restore_tm_user_regs(regs, mcp, tm_mcp))
900 		return -EFAULT;
901 
902 	return 0;
903 }
904 #endif
905 
906 #ifdef CONFIG_PPC64
COMPAT_SYSCALL_DEFINE3(swapcontext,struct ucontext __user *,old_ctx,struct ucontext __user *,new_ctx,int,ctx_size)907 COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
908 		       struct ucontext __user *, new_ctx, int, ctx_size)
909 #else
910 SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
911 		       struct ucontext __user *, new_ctx, long, ctx_size)
912 #endif
913 {
914 	struct pt_regs *regs = current_pt_regs();
915 	int ctx_has_vsx_region = 0;
916 
917 #ifdef CONFIG_PPC64
918 	unsigned long new_msr = 0;
919 
920 	if (new_ctx) {
921 		struct mcontext __user *mcp;
922 		u32 cmcp;
923 
924 		/*
925 		 * Get pointer to the real mcontext.  No need for
926 		 * access_ok since we are dealing with compat
927 		 * pointers.
928 		 */
929 		if (__get_user(cmcp, &new_ctx->uc_regs))
930 			return -EFAULT;
931 		mcp = (struct mcontext __user *)(u64)cmcp;
932 		if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
933 			return -EFAULT;
934 	}
935 	/*
936 	 * Check that the context is not smaller than the original
937 	 * size (with VMX but without VSX)
938 	 */
939 	if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
940 		return -EINVAL;
941 	/*
942 	 * If the new context state sets the MSR VSX bits but
943 	 * it doesn't provide VSX state.
944 	 */
945 	if ((ctx_size < sizeof(struct ucontext)) &&
946 	    (new_msr & MSR_VSX))
947 		return -EINVAL;
948 	/* Does the context have enough room to store VSX data? */
949 	if (ctx_size >= sizeof(struct ucontext))
950 		ctx_has_vsx_region = 1;
951 #else
952 	/* Context size is for future use. Right now, we only make sure
953 	 * we are passed something we understand
954 	 */
955 	if (ctx_size < sizeof(struct ucontext))
956 		return -EINVAL;
957 #endif
958 	if (old_ctx != NULL) {
959 		struct mcontext __user *mctx;
960 
961 		/*
962 		 * old_ctx might not be 16-byte aligned, in which
963 		 * case old_ctx->uc_mcontext won't be either.
964 		 * Because we have the old_ctx->uc_pad2 field
965 		 * before old_ctx->uc_mcontext, we need to round down
966 		 * from &old_ctx->uc_mcontext to a 16-byte boundary.
967 		 */
968 		mctx = (struct mcontext __user *)
969 			((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
970 		if (!access_ok(old_ctx, ctx_size)
971 		    || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
972 		    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
973 		    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
974 			return -EFAULT;
975 	}
976 	if (new_ctx == NULL)
977 		return 0;
978 	if (!access_ok(new_ctx, ctx_size) ||
979 	    fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
980 		return -EFAULT;
981 
982 	/*
983 	 * If we get a fault copying the context into the kernel's
984 	 * image of the user's registers, we can't just return -EFAULT
985 	 * because the user's registers will be corrupted.  For instance
986 	 * the NIP value may have been updated but not some of the
987 	 * other registers.  Given that we have done the access_ok
988 	 * and successfully read the first and last bytes of the region
989 	 * above, this should only happen in an out-of-memory situation
990 	 * or if another thread unmaps the region containing the context.
991 	 * We kill the task with a SIGSEGV in this situation.
992 	 */
993 	if (do_setcontext(new_ctx, regs, 0))
994 		do_exit(SIGSEGV);
995 
996 	set_thread_flag(TIF_RESTOREALL);
997 	return 0;
998 }
999 
1000 #ifdef CONFIG_PPC64
COMPAT_SYSCALL_DEFINE0(rt_sigreturn)1001 COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1002 #else
1003 SYSCALL_DEFINE0(rt_sigreturn)
1004 #endif
1005 {
1006 	struct rt_sigframe __user *rt_sf;
1007 	struct pt_regs *regs = current_pt_regs();
1008 	int tm_restore = 0;
1009 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1010 	struct ucontext __user *uc_transact;
1011 	unsigned long msr_hi;
1012 	unsigned long tmp;
1013 #endif
1014 	/* Always make any pending restarted system calls return -EINTR */
1015 	current->restart_block.fn = do_no_restart_syscall;
1016 
1017 	rt_sf = (struct rt_sigframe __user *)
1018 		(regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1019 	if (!access_ok(rt_sf, sizeof(*rt_sf)))
1020 		goto bad;
1021 
1022 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1023 	/*
1024 	 * If there is a transactional state then throw it away.
1025 	 * The purpose of a sigreturn is to destroy all traces of the
1026 	 * signal frame, this includes any transactional state created
1027 	 * within in. We only check for suspended as we can never be
1028 	 * active in the kernel, we are active, there is nothing better to
1029 	 * do than go ahead and Bad Thing later.
1030 	 * The cause is not important as there will never be a
1031 	 * recheckpoint so it's not user visible.
1032 	 */
1033 	if (MSR_TM_SUSPENDED(mfmsr()))
1034 		tm_reclaim_current(0);
1035 
1036 	if (__get_user(tmp, &rt_sf->uc.uc_link))
1037 		goto bad;
1038 	uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1039 	if (uc_transact) {
1040 		u32 cmcp;
1041 		struct mcontext __user *mcp;
1042 
1043 		if (__get_user(cmcp, &uc_transact->uc_regs))
1044 			return -EFAULT;
1045 		mcp = (struct mcontext __user *)(u64)cmcp;
1046 		/* The top 32 bits of the MSR are stashed in the transactional
1047 		 * ucontext. */
1048 		if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1049 			goto bad;
1050 
1051 		if (MSR_TM_ACTIVE(msr_hi<<32)) {
1052 			/* Trying to start TM on non TM system */
1053 			if (!cpu_has_feature(CPU_FTR_TM))
1054 				goto bad;
1055 			/* We only recheckpoint on return if we're
1056 			 * transaction.
1057 			 */
1058 			tm_restore = 1;
1059 			if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1060 				goto bad;
1061 		}
1062 	}
1063 	if (!tm_restore) {
1064 		/*
1065 		 * Unset regs->msr because ucontext MSR TS is not
1066 		 * set, and recheckpoint was not called. This avoid
1067 		 * hitting a TM Bad thing at RFID
1068 		 */
1069 		regs->msr &= ~MSR_TS_MASK;
1070 	}
1071 	/* Fall through, for non-TM restore */
1072 #endif
1073 	if (!tm_restore)
1074 		if (do_setcontext(&rt_sf->uc, regs, 1))
1075 			goto bad;
1076 
1077 	/*
1078 	 * It's not clear whether or why it is desirable to save the
1079 	 * sigaltstack setting on signal delivery and restore it on
1080 	 * signal return.  But other architectures do this and we have
1081 	 * always done it up until now so it is probably better not to
1082 	 * change it.  -- paulus
1083 	 */
1084 #ifdef CONFIG_PPC64
1085 	if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1086 		goto bad;
1087 #else
1088 	if (restore_altstack(&rt_sf->uc.uc_stack))
1089 		goto bad;
1090 #endif
1091 	set_thread_flag(TIF_RESTOREALL);
1092 	return 0;
1093 
1094  bad:
1095 	if (show_unhandled_signals)
1096 		printk_ratelimited(KERN_INFO
1097 				   "%s[%d]: bad frame in sys_rt_sigreturn: "
1098 				   "%p nip %08lx lr %08lx\n",
1099 				   current->comm, current->pid,
1100 				   rt_sf, regs->nip, regs->link);
1101 
1102 	force_sig(SIGSEGV);
1103 	return 0;
1104 }
1105 
1106 #ifdef CONFIG_PPC32
SYSCALL_DEFINE3(debug_setcontext,struct ucontext __user *,ctx,int,ndbg,struct sig_dbg_op __user *,dbg)1107 SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1108 			 int, ndbg, struct sig_dbg_op __user *, dbg)
1109 {
1110 	struct pt_regs *regs = current_pt_regs();
1111 	struct sig_dbg_op op;
1112 	int i;
1113 	unsigned long new_msr = regs->msr;
1114 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1115 	unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1116 #endif
1117 
1118 	for (i=0; i<ndbg; i++) {
1119 		if (copy_from_user(&op, dbg + i, sizeof(op)))
1120 			return -EFAULT;
1121 		switch (op.dbg_type) {
1122 		case SIG_DBG_SINGLE_STEPPING:
1123 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1124 			if (op.dbg_value) {
1125 				new_msr |= MSR_DE;
1126 				new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1127 			} else {
1128 				new_dbcr0 &= ~DBCR0_IC;
1129 				if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1130 						current->thread.debug.dbcr1)) {
1131 					new_msr &= ~MSR_DE;
1132 					new_dbcr0 &= ~DBCR0_IDM;
1133 				}
1134 			}
1135 #else
1136 			if (op.dbg_value)
1137 				new_msr |= MSR_SE;
1138 			else
1139 				new_msr &= ~MSR_SE;
1140 #endif
1141 			break;
1142 		case SIG_DBG_BRANCH_TRACING:
1143 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1144 			return -EINVAL;
1145 #else
1146 			if (op.dbg_value)
1147 				new_msr |= MSR_BE;
1148 			else
1149 				new_msr &= ~MSR_BE;
1150 #endif
1151 			break;
1152 
1153 		default:
1154 			return -EINVAL;
1155 		}
1156 	}
1157 
1158 	/* We wait until here to actually install the values in the
1159 	   registers so if we fail in the above loop, it will not
1160 	   affect the contents of these registers.  After this point,
1161 	   failure is a problem, anyway, and it's very unlikely unless
1162 	   the user is really doing something wrong. */
1163 	regs->msr = new_msr;
1164 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1165 	current->thread.debug.dbcr0 = new_dbcr0;
1166 #endif
1167 
1168 	if (!access_ok(ctx, sizeof(*ctx)) ||
1169 	    fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1170 		return -EFAULT;
1171 
1172 	/*
1173 	 * If we get a fault copying the context into the kernel's
1174 	 * image of the user's registers, we can't just return -EFAULT
1175 	 * because the user's registers will be corrupted.  For instance
1176 	 * the NIP value may have been updated but not some of the
1177 	 * other registers.  Given that we have done the access_ok
1178 	 * and successfully read the first and last bytes of the region
1179 	 * above, this should only happen in an out-of-memory situation
1180 	 * or if another thread unmaps the region containing the context.
1181 	 * We kill the task with a SIGSEGV in this situation.
1182 	 */
1183 	if (do_setcontext(ctx, regs, 1)) {
1184 		if (show_unhandled_signals)
1185 			printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1186 					   "sys_debug_setcontext: %p nip %08lx "
1187 					   "lr %08lx\n",
1188 					   current->comm, current->pid,
1189 					   ctx, regs->nip, regs->link);
1190 
1191 		force_sig(SIGSEGV);
1192 		goto out;
1193 	}
1194 
1195 	/*
1196 	 * It's not clear whether or why it is desirable to save the
1197 	 * sigaltstack setting on signal delivery and restore it on
1198 	 * signal return.  But other architectures do this and we have
1199 	 * always done it up until now so it is probably better not to
1200 	 * change it.  -- paulus
1201 	 */
1202 	restore_altstack(&ctx->uc_stack);
1203 
1204 	set_thread_flag(TIF_RESTOREALL);
1205  out:
1206 	return 0;
1207 }
1208 #endif
1209 
1210 /*
1211  * OK, we're invoking a handler
1212  */
handle_signal32(struct ksignal * ksig,sigset_t * oldset,struct task_struct * tsk)1213 int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1214 		struct task_struct *tsk)
1215 {
1216 	struct sigcontext __user *sc;
1217 	struct sigframe __user *frame;
1218 	struct mcontext __user *tm_mctx = NULL;
1219 	unsigned long newsp = 0;
1220 	int sigret;
1221 	unsigned long tramp;
1222 	struct pt_regs *regs = tsk->thread.regs;
1223 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1224 	/* Save the thread's msr before get_tm_stackpointer() changes it */
1225 	unsigned long msr = regs->msr;
1226 #endif
1227 
1228 	BUG_ON(tsk != current);
1229 
1230 	/* Set up Signal Frame */
1231 	frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1232 	if (unlikely(frame == NULL))
1233 		goto badframe;
1234 	sc = (struct sigcontext __user *) &frame->sctx;
1235 
1236 #if _NSIG != 64
1237 #error "Please adjust handle_signal()"
1238 #endif
1239 	if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1240 	    || __put_user(oldset->sig[0], &sc->oldmask)
1241 #ifdef CONFIG_PPC64
1242 	    || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1243 #else
1244 	    || __put_user(oldset->sig[1], &sc->_unused[3])
1245 #endif
1246 	    || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1247 	    || __put_user(ksig->sig, &sc->signal))
1248 		goto badframe;
1249 
1250 	if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1251 		sigret = 0;
1252 		tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1253 	} else {
1254 		sigret = __NR_sigreturn;
1255 		tramp = (unsigned long) frame->mctx.tramp;
1256 	}
1257 
1258 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1259 	tm_mctx = &frame->mctx_transact;
1260 	if (MSR_TM_ACTIVE(msr)) {
1261 		if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1262 				      sigret, msr))
1263 			goto badframe;
1264 	}
1265 	else
1266 #endif
1267 	{
1268 		if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1269 			goto badframe;
1270 	}
1271 
1272 	regs->link = tramp;
1273 
1274 	tsk->thread.fp_state.fpscr = 0;	/* turn off all fp exceptions */
1275 
1276 	/* create a stack frame for the caller of the handler */
1277 	newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1278 	if (put_user(regs->gpr[1], (u32 __user *)newsp))
1279 		goto badframe;
1280 
1281 	regs->gpr[1] = newsp;
1282 	regs->gpr[3] = ksig->sig;
1283 	regs->gpr[4] = (unsigned long) sc;
1284 	regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1285 	/* enter the signal handler in big-endian mode */
1286 	regs->msr &= ~MSR_LE;
1287 	return 0;
1288 
1289 badframe:
1290 	if (show_unhandled_signals)
1291 		printk_ratelimited(KERN_INFO
1292 				   "%s[%d]: bad frame in handle_signal32: "
1293 				   "%p nip %08lx lr %08lx\n",
1294 				   tsk->comm, tsk->pid,
1295 				   frame, regs->nip, regs->link);
1296 
1297 	return 1;
1298 }
1299 
1300 /*
1301  * Do a signal return; undo the signal stack.
1302  */
1303 #ifdef CONFIG_PPC64
COMPAT_SYSCALL_DEFINE0(sigreturn)1304 COMPAT_SYSCALL_DEFINE0(sigreturn)
1305 #else
1306 SYSCALL_DEFINE0(sigreturn)
1307 #endif
1308 {
1309 	struct pt_regs *regs = current_pt_regs();
1310 	struct sigframe __user *sf;
1311 	struct sigcontext __user *sc;
1312 	struct sigcontext sigctx;
1313 	struct mcontext __user *sr;
1314 	void __user *addr;
1315 	sigset_t set;
1316 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1317 	struct mcontext __user *mcp, *tm_mcp;
1318 	unsigned long msr_hi;
1319 #endif
1320 
1321 	/* Always make any pending restarted system calls return -EINTR */
1322 	current->restart_block.fn = do_no_restart_syscall;
1323 
1324 	sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1325 	sc = &sf->sctx;
1326 	addr = sc;
1327 	if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1328 		goto badframe;
1329 
1330 #ifdef CONFIG_PPC64
1331 	/*
1332 	 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1333 	 * unused part of the signal stackframe
1334 	 */
1335 	set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1336 #else
1337 	set.sig[0] = sigctx.oldmask;
1338 	set.sig[1] = sigctx._unused[3];
1339 #endif
1340 	set_current_blocked(&set);
1341 
1342 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1343 	mcp = (struct mcontext __user *)&sf->mctx;
1344 	tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1345 	if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1346 		goto badframe;
1347 	if (MSR_TM_ACTIVE(msr_hi<<32)) {
1348 		if (!cpu_has_feature(CPU_FTR_TM))
1349 			goto badframe;
1350 		if (restore_tm_user_regs(regs, mcp, tm_mcp))
1351 			goto badframe;
1352 	} else
1353 #endif
1354 	{
1355 		sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1356 		addr = sr;
1357 		if (!access_ok(sr, sizeof(*sr))
1358 		    || restore_user_regs(regs, sr, 1))
1359 			goto badframe;
1360 	}
1361 
1362 	set_thread_flag(TIF_RESTOREALL);
1363 	return 0;
1364 
1365 badframe:
1366 	if (show_unhandled_signals)
1367 		printk_ratelimited(KERN_INFO
1368 				   "%s[%d]: bad frame in sys_sigreturn: "
1369 				   "%p nip %08lx lr %08lx\n",
1370 				   current->comm, current->pid,
1371 				   addr, regs->nip, regs->link);
1372 
1373 	force_sig(SIGSEGV);
1374 	return 0;
1375 }
1376