1 /*
2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
3 *
4 * PowerPC version
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Copyright (C) 2001 IBM
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 *
10 * Derived from "arch/i386/kernel/signal.c"
11 * Copyright (C) 1991, 1992 Linus Torvalds
12 * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
13 *
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 */
19
20 #include <linux/sched.h>
21 #include <linux/mm.h>
22 #include <linux/smp.h>
23 #include <linux/kernel.h>
24 #include <linux/signal.h>
25 #include <linux/errno.h>
26 #include <linux/elf.h>
27 #include <linux/ptrace.h>
28 #include <linux/ratelimit.h>
29 #ifdef CONFIG_PPC64
30 #include <linux/syscalls.h>
31 #include <linux/compat.h>
32 #else
33 #include <linux/wait.h>
34 #include <linux/unistd.h>
35 #include <linux/stddef.h>
36 #include <linux/tty.h>
37 #include <linux/binfmts.h>
38 #include <linux/freezer.h>
39 #endif
40
41 #include <asm/uaccess.h>
42 #include <asm/cacheflush.h>
43 #include <asm/syscalls.h>
44 #include <asm/sigcontext.h>
45 #include <asm/vdso.h>
46 #include <asm/switch_to.h>
47 #ifdef CONFIG_PPC64
48 #include "ppc32.h"
49 #include <asm/unistd.h>
50 #else
51 #include <asm/ucontext.h>
52 #include <asm/pgtable.h>
53 #endif
54
55 #include "signal.h"
56
57 #undef DEBUG_SIG
58
59 #ifdef CONFIG_PPC64
60 #define sys_sigsuspend compat_sys_sigsuspend
61 #define sys_rt_sigsuspend compat_sys_rt_sigsuspend
62 #define sys_rt_sigreturn compat_sys_rt_sigreturn
63 #define sys_sigaction compat_sys_sigaction
64 #define sys_swapcontext compat_sys_swapcontext
65 #define sys_sigreturn compat_sys_sigreturn
66
67 #define old_sigaction old_sigaction32
68 #define sigcontext sigcontext32
69 #define mcontext mcontext32
70 #define ucontext ucontext32
71
72 /*
73 * Userspace code may pass a ucontext which doesn't include VSX added
74 * at the end. We need to check for this case.
75 */
76 #define UCONTEXTSIZEWITHOUTVSX \
77 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
78
79 /*
80 * Returning 0 means we return to userspace via
81 * ret_from_except and thus restore all user
82 * registers from *regs. This is what we need
83 * to do when a signal has been delivered.
84 */
85
86 #define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
87 #undef __SIGNAL_FRAMESIZE
88 #define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
89 #undef ELF_NVRREG
90 #define ELF_NVRREG ELF_NVRREG32
91
92 /*
93 * Functions for flipping sigsets (thanks to brain dead generic
94 * implementation that makes things simple for little endian only)
95 */
put_sigset_t(compat_sigset_t __user * uset,sigset_t * set)96 static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
97 {
98 compat_sigset_t cset;
99
100 switch (_NSIG_WORDS) {
101 case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
102 cset.sig[7] = set->sig[3] >> 32;
103 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
104 cset.sig[5] = set->sig[2] >> 32;
105 case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
106 cset.sig[3] = set->sig[1] >> 32;
107 case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
108 cset.sig[1] = set->sig[0] >> 32;
109 }
110 return copy_to_user(uset, &cset, sizeof(*uset));
111 }
112
get_sigset_t(sigset_t * set,const compat_sigset_t __user * uset)113 static inline int get_sigset_t(sigset_t *set,
114 const compat_sigset_t __user *uset)
115 {
116 compat_sigset_t s32;
117
118 if (copy_from_user(&s32, uset, sizeof(*uset)))
119 return -EFAULT;
120
121 /*
122 * Swap the 2 words of the 64-bit sigset_t (they are stored
123 * in the "wrong" endian in 32-bit user storage).
124 */
125 switch (_NSIG_WORDS) {
126 case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
127 case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
128 case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
129 case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
130 }
131 return 0;
132 }
133
get_old_sigaction(struct k_sigaction * new_ka,struct old_sigaction __user * act)134 static inline int get_old_sigaction(struct k_sigaction *new_ka,
135 struct old_sigaction __user *act)
136 {
137 compat_old_sigset_t mask;
138 compat_uptr_t handler, restorer;
139
140 if (get_user(handler, &act->sa_handler) ||
141 __get_user(restorer, &act->sa_restorer) ||
142 __get_user(new_ka->sa.sa_flags, &act->sa_flags) ||
143 __get_user(mask, &act->sa_mask))
144 return -EFAULT;
145 new_ka->sa.sa_handler = compat_ptr(handler);
146 new_ka->sa.sa_restorer = compat_ptr(restorer);
147 siginitset(&new_ka->sa.sa_mask, mask);
148 return 0;
149 }
150
151 #define to_user_ptr(p) ptr_to_compat(p)
152 #define from_user_ptr(p) compat_ptr(p)
153
save_general_regs(struct pt_regs * regs,struct mcontext __user * frame)154 static inline int save_general_regs(struct pt_regs *regs,
155 struct mcontext __user *frame)
156 {
157 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
158 int i;
159
160 WARN_ON(!FULL_REGS(regs));
161
162 for (i = 0; i <= PT_RESULT; i ++) {
163 if (i == 14 && !FULL_REGS(regs))
164 i = 32;
165 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
166 return -EFAULT;
167 }
168 return 0;
169 }
170
restore_general_regs(struct pt_regs * regs,struct mcontext __user * sr)171 static inline int restore_general_regs(struct pt_regs *regs,
172 struct mcontext __user *sr)
173 {
174 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
175 int i;
176
177 for (i = 0; i <= PT_RESULT; i++) {
178 if ((i == PT_MSR) || (i == PT_SOFTE))
179 continue;
180 if (__get_user(gregs[i], &sr->mc_gregs[i]))
181 return -EFAULT;
182 }
183 return 0;
184 }
185
186 #else /* CONFIG_PPC64 */
187
188 #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
189
put_sigset_t(sigset_t __user * uset,sigset_t * set)190 static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
191 {
192 return copy_to_user(uset, set, sizeof(*uset));
193 }
194
get_sigset_t(sigset_t * set,const sigset_t __user * uset)195 static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
196 {
197 return copy_from_user(set, uset, sizeof(*uset));
198 }
199
get_old_sigaction(struct k_sigaction * new_ka,struct old_sigaction __user * act)200 static inline int get_old_sigaction(struct k_sigaction *new_ka,
201 struct old_sigaction __user *act)
202 {
203 old_sigset_t mask;
204
205 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
206 __get_user(new_ka->sa.sa_handler, &act->sa_handler) ||
207 __get_user(new_ka->sa.sa_restorer, &act->sa_restorer))
208 return -EFAULT;
209 __get_user(new_ka->sa.sa_flags, &act->sa_flags);
210 __get_user(mask, &act->sa_mask);
211 siginitset(&new_ka->sa.sa_mask, mask);
212 return 0;
213 }
214
215 #define to_user_ptr(p) ((unsigned long)(p))
216 #define from_user_ptr(p) ((void __user *)(p))
217
save_general_regs(struct pt_regs * regs,struct mcontext __user * frame)218 static inline int save_general_regs(struct pt_regs *regs,
219 struct mcontext __user *frame)
220 {
221 WARN_ON(!FULL_REGS(regs));
222 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
223 }
224
restore_general_regs(struct pt_regs * regs,struct mcontext __user * sr)225 static inline int restore_general_regs(struct pt_regs *regs,
226 struct mcontext __user *sr)
227 {
228 /* copy up to but not including MSR */
229 if (__copy_from_user(regs, &sr->mc_gregs,
230 PT_MSR * sizeof(elf_greg_t)))
231 return -EFAULT;
232 /* copy from orig_r3 (the word after the MSR) up to the end */
233 if (__copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
234 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
235 return -EFAULT;
236 return 0;
237 }
238
239 #endif /* CONFIG_PPC64 */
240
241 /*
242 * Atomically swap in the new signal mask, and wait for a signal.
243 */
sys_sigsuspend(old_sigset_t mask)244 long sys_sigsuspend(old_sigset_t mask)
245 {
246 sigset_t blocked;
247
248 current->saved_sigmask = current->blocked;
249
250 mask &= _BLOCKABLE;
251 siginitset(&blocked, mask);
252 set_current_blocked(&blocked);
253
254 current->state = TASK_INTERRUPTIBLE;
255 schedule();
256 set_restore_sigmask();
257 return -ERESTARTNOHAND;
258 }
259
sys_sigaction(int sig,struct old_sigaction __user * act,struct old_sigaction __user * oact)260 long sys_sigaction(int sig, struct old_sigaction __user *act,
261 struct old_sigaction __user *oact)
262 {
263 struct k_sigaction new_ka, old_ka;
264 int ret;
265
266 #ifdef CONFIG_PPC64
267 if (sig < 0)
268 sig = -sig;
269 #endif
270
271 if (act) {
272 if (get_old_sigaction(&new_ka, act))
273 return -EFAULT;
274 }
275
276 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
277 if (!ret && oact) {
278 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
279 __put_user(to_user_ptr(old_ka.sa.sa_handler),
280 &oact->sa_handler) ||
281 __put_user(to_user_ptr(old_ka.sa.sa_restorer),
282 &oact->sa_restorer) ||
283 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
284 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
285 return -EFAULT;
286 }
287
288 return ret;
289 }
290
291 /*
292 * When we have signals to deliver, we set up on the
293 * user stack, going down from the original stack pointer:
294 * an ABI gap of 56 words
295 * an mcontext struct
296 * a sigcontext struct
297 * a gap of __SIGNAL_FRAMESIZE bytes
298 *
299 * Each of these things must be a multiple of 16 bytes in size. The following
300 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
301 *
302 */
303 struct sigframe {
304 struct sigcontext sctx; /* the sigcontext */
305 struct mcontext mctx; /* all the register values */
306 /*
307 * Programs using the rs6000/xcoff abi can save up to 19 gp
308 * regs and 18 fp regs below sp before decrementing it.
309 */
310 int abigap[56];
311 };
312
313 /* We use the mc_pad field for the signal return trampoline. */
314 #define tramp mc_pad
315
316 /*
317 * When we have rt signals to deliver, we set up on the
318 * user stack, going down from the original stack pointer:
319 * one rt_sigframe struct (siginfo + ucontext + ABI gap)
320 * a gap of __SIGNAL_FRAMESIZE+16 bytes
321 * (the +16 is to get the siginfo and ucontext in the same
322 * positions as in older kernels).
323 *
324 * Each of these things must be a multiple of 16 bytes in size.
325 *
326 */
327 struct rt_sigframe {
328 #ifdef CONFIG_PPC64
329 compat_siginfo_t info;
330 #else
331 struct siginfo info;
332 #endif
333 struct ucontext uc;
334 /*
335 * Programs using the rs6000/xcoff abi can save up to 19 gp
336 * regs and 18 fp regs below sp before decrementing it.
337 */
338 int abigap[56];
339 };
340
341 #ifdef CONFIG_VSX
copy_fpr_to_user(void __user * to,struct task_struct * task)342 unsigned long copy_fpr_to_user(void __user *to,
343 struct task_struct *task)
344 {
345 double buf[ELF_NFPREG];
346 int i;
347
348 /* save FPR copy to local buffer then write to the thread_struct */
349 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
350 buf[i] = task->thread.TS_FPR(i);
351 memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
352 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
353 }
354
copy_fpr_from_user(struct task_struct * task,void __user * from)355 unsigned long copy_fpr_from_user(struct task_struct *task,
356 void __user *from)
357 {
358 double buf[ELF_NFPREG];
359 int i;
360
361 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
362 return 1;
363 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
364 task->thread.TS_FPR(i) = buf[i];
365 memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
366
367 return 0;
368 }
369
copy_vsx_to_user(void __user * to,struct task_struct * task)370 unsigned long copy_vsx_to_user(void __user *to,
371 struct task_struct *task)
372 {
373 double buf[ELF_NVSRHALFREG];
374 int i;
375
376 /* save FPR copy to local buffer then write to the thread_struct */
377 for (i = 0; i < ELF_NVSRHALFREG; i++)
378 buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
379 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
380 }
381
copy_vsx_from_user(struct task_struct * task,void __user * from)382 unsigned long copy_vsx_from_user(struct task_struct *task,
383 void __user *from)
384 {
385 double buf[ELF_NVSRHALFREG];
386 int i;
387
388 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
389 return 1;
390 for (i = 0; i < ELF_NVSRHALFREG ; i++)
391 task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
392 return 0;
393 }
394 #else
copy_fpr_to_user(void __user * to,struct task_struct * task)395 inline unsigned long copy_fpr_to_user(void __user *to,
396 struct task_struct *task)
397 {
398 return __copy_to_user(to, task->thread.fpr,
399 ELF_NFPREG * sizeof(double));
400 }
401
copy_fpr_from_user(struct task_struct * task,void __user * from)402 inline unsigned long copy_fpr_from_user(struct task_struct *task,
403 void __user *from)
404 {
405 return __copy_from_user(task->thread.fpr, from,
406 ELF_NFPREG * sizeof(double));
407 }
408 #endif
409
410 /*
411 * Save the current user registers on the user stack.
412 * We only save the altivec/spe registers if the process has used
413 * altivec/spe instructions at some point.
414 */
save_user_regs(struct pt_regs * regs,struct mcontext __user * frame,int sigret,int ctx_has_vsx_region)415 static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
416 int sigret, int ctx_has_vsx_region)
417 {
418 unsigned long msr = regs->msr;
419
420 /* Make sure floating point registers are stored in regs */
421 flush_fp_to_thread(current);
422
423 /* save general registers */
424 if (save_general_regs(regs, frame))
425 return 1;
426
427 #ifdef CONFIG_ALTIVEC
428 /* save altivec registers */
429 if (current->thread.used_vr) {
430 flush_altivec_to_thread(current);
431 if (__copy_to_user(&frame->mc_vregs, current->thread.vr,
432 ELF_NVRREG * sizeof(vector128)))
433 return 1;
434 /* set MSR_VEC in the saved MSR value to indicate that
435 frame->mc_vregs contains valid data */
436 msr |= MSR_VEC;
437 }
438 /* else assert((regs->msr & MSR_VEC) == 0) */
439
440 /* We always copy to/from vrsave, it's 0 if we don't have or don't
441 * use altivec. Since VSCR only contains 32 bits saved in the least
442 * significant bits of a vector, we "cheat" and stuff VRSAVE in the
443 * most significant bits of that same vector. --BenH
444 */
445 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
446 return 1;
447 #endif /* CONFIG_ALTIVEC */
448 if (copy_fpr_to_user(&frame->mc_fregs, current))
449 return 1;
450 #ifdef CONFIG_VSX
451 /*
452 * Copy VSR 0-31 upper half from thread_struct to local
453 * buffer, then write that to userspace. Also set MSR_VSX in
454 * the saved MSR value to indicate that frame->mc_vregs
455 * contains valid data
456 */
457 if (current->thread.used_vsr && ctx_has_vsx_region) {
458 __giveup_vsx(current);
459 if (copy_vsx_to_user(&frame->mc_vsregs, current))
460 return 1;
461 msr |= MSR_VSX;
462 }
463 #endif /* CONFIG_VSX */
464 #ifdef CONFIG_SPE
465 /* save spe registers */
466 if (current->thread.used_spe) {
467 flush_spe_to_thread(current);
468 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
469 ELF_NEVRREG * sizeof(u32)))
470 return 1;
471 /* set MSR_SPE in the saved MSR value to indicate that
472 frame->mc_vregs contains valid data */
473 msr |= MSR_SPE;
474 }
475 /* else assert((regs->msr & MSR_SPE) == 0) */
476
477 /* We always copy to/from spefscr */
478 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
479 return 1;
480 #endif /* CONFIG_SPE */
481
482 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
483 return 1;
484 if (sigret) {
485 /* Set up the sigreturn trampoline: li r0,sigret; sc */
486 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
487 || __put_user(0x44000002UL, &frame->tramp[1]))
488 return 1;
489 flush_icache_range((unsigned long) &frame->tramp[0],
490 (unsigned long) &frame->tramp[2]);
491 }
492
493 return 0;
494 }
495
496 /*
497 * Restore the current user register values from the user stack,
498 * (except for MSR).
499 */
restore_user_regs(struct pt_regs * regs,struct mcontext __user * sr,int sig)500 static long restore_user_regs(struct pt_regs *regs,
501 struct mcontext __user *sr, int sig)
502 {
503 long err;
504 unsigned int save_r2 = 0;
505 unsigned long msr;
506 #ifdef CONFIG_VSX
507 int i;
508 #endif
509
510 /*
511 * restore general registers but not including MSR or SOFTE. Also
512 * take care of keeping r2 (TLS) intact if not a signal
513 */
514 if (!sig)
515 save_r2 = (unsigned int)regs->gpr[2];
516 err = restore_general_regs(regs, sr);
517 regs->trap = 0;
518 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
519 if (!sig)
520 regs->gpr[2] = (unsigned long) save_r2;
521 if (err)
522 return 1;
523
524 /* if doing signal return, restore the previous little-endian mode */
525 if (sig)
526 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
527
528 /*
529 * Do this before updating the thread state in
530 * current->thread.fpr/vr/evr. That way, if we get preempted
531 * and another task grabs the FPU/Altivec/SPE, it won't be
532 * tempted to save the current CPU state into the thread_struct
533 * and corrupt what we are writing there.
534 */
535 discard_lazy_cpu_state();
536
537 #ifdef CONFIG_ALTIVEC
538 /*
539 * Force the process to reload the altivec registers from
540 * current->thread when it next does altivec instructions
541 */
542 regs->msr &= ~MSR_VEC;
543 if (msr & MSR_VEC) {
544 /* restore altivec registers from the stack */
545 if (__copy_from_user(current->thread.vr, &sr->mc_vregs,
546 sizeof(sr->mc_vregs)))
547 return 1;
548 } else if (current->thread.used_vr)
549 memset(current->thread.vr, 0, ELF_NVRREG * sizeof(vector128));
550
551 /* Always get VRSAVE back */
552 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
553 return 1;
554 #endif /* CONFIG_ALTIVEC */
555 if (copy_fpr_from_user(current, &sr->mc_fregs))
556 return 1;
557
558 #ifdef CONFIG_VSX
559 /*
560 * Force the process to reload the VSX registers from
561 * current->thread when it next does VSX instruction.
562 */
563 regs->msr &= ~MSR_VSX;
564 if (msr & MSR_VSX) {
565 /*
566 * Restore altivec registers from the stack to a local
567 * buffer, then write this out to the thread_struct
568 */
569 if (copy_vsx_from_user(current, &sr->mc_vsregs))
570 return 1;
571 } else if (current->thread.used_vsr)
572 for (i = 0; i < 32 ; i++)
573 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
574 #endif /* CONFIG_VSX */
575 /*
576 * force the process to reload the FP registers from
577 * current->thread when it next does FP instructions
578 */
579 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
580
581 #ifdef CONFIG_SPE
582 /* force the process to reload the spe registers from
583 current->thread when it next does spe instructions */
584 regs->msr &= ~MSR_SPE;
585 if (msr & MSR_SPE) {
586 /* restore spe registers from the stack */
587 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
588 ELF_NEVRREG * sizeof(u32)))
589 return 1;
590 } else if (current->thread.used_spe)
591 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
592
593 /* Always get SPEFSCR back */
594 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
595 return 1;
596 #endif /* CONFIG_SPE */
597
598 return 0;
599 }
600
601 #ifdef CONFIG_PPC64
compat_sys_rt_sigaction(int sig,const struct sigaction32 __user * act,struct sigaction32 __user * oact,size_t sigsetsize)602 long compat_sys_rt_sigaction(int sig, const struct sigaction32 __user *act,
603 struct sigaction32 __user *oact, size_t sigsetsize)
604 {
605 struct k_sigaction new_ka, old_ka;
606 int ret;
607
608 /* XXX: Don't preclude handling different sized sigset_t's. */
609 if (sigsetsize != sizeof(compat_sigset_t))
610 return -EINVAL;
611
612 if (act) {
613 compat_uptr_t handler;
614
615 ret = get_user(handler, &act->sa_handler);
616 new_ka.sa.sa_handler = compat_ptr(handler);
617 ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask);
618 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
619 if (ret)
620 return -EFAULT;
621 }
622
623 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
624 if (!ret && oact) {
625 ret = put_user(to_user_ptr(old_ka.sa.sa_handler), &oact->sa_handler);
626 ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask);
627 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
628 }
629 return ret;
630 }
631
632 /*
633 * Note: it is necessary to treat how as an unsigned int, with the
634 * corresponding cast to a signed int to insure that the proper
635 * conversion (sign extension) between the register representation
636 * of a signed int (msr in 32-bit mode) and the register representation
637 * of a signed int (msr in 64-bit mode) is performed.
638 */
compat_sys_rt_sigprocmask(u32 how,compat_sigset_t __user * set,compat_sigset_t __user * oset,size_t sigsetsize)639 long compat_sys_rt_sigprocmask(u32 how, compat_sigset_t __user *set,
640 compat_sigset_t __user *oset, size_t sigsetsize)
641 {
642 sigset_t s;
643 sigset_t __user *up;
644 int ret;
645 mm_segment_t old_fs = get_fs();
646
647 if (set) {
648 if (get_sigset_t(&s, set))
649 return -EFAULT;
650 }
651
652 set_fs(KERNEL_DS);
653 /* This is valid because of the set_fs() */
654 up = (sigset_t __user *) &s;
655 ret = sys_rt_sigprocmask((int)how, set ? up : NULL, oset ? up : NULL,
656 sigsetsize);
657 set_fs(old_fs);
658 if (ret)
659 return ret;
660 if (oset) {
661 if (put_sigset_t(oset, &s))
662 return -EFAULT;
663 }
664 return 0;
665 }
666
compat_sys_rt_sigpending(compat_sigset_t __user * set,compat_size_t sigsetsize)667 long compat_sys_rt_sigpending(compat_sigset_t __user *set, compat_size_t sigsetsize)
668 {
669 sigset_t s;
670 int ret;
671 mm_segment_t old_fs = get_fs();
672
673 set_fs(KERNEL_DS);
674 /* The __user pointer cast is valid because of the set_fs() */
675 ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
676 set_fs(old_fs);
677 if (!ret) {
678 if (put_sigset_t(set, &s))
679 return -EFAULT;
680 }
681 return ret;
682 }
683
684
copy_siginfo_to_user32(struct compat_siginfo __user * d,siginfo_t * s)685 int copy_siginfo_to_user32(struct compat_siginfo __user *d, siginfo_t *s)
686 {
687 int err;
688
689 if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
690 return -EFAULT;
691
692 /* If you change siginfo_t structure, please be sure
693 * this code is fixed accordingly.
694 * It should never copy any pad contained in the structure
695 * to avoid security leaks, but must copy the generic
696 * 3 ints plus the relevant union member.
697 * This routine must convert siginfo from 64bit to 32bit as well
698 * at the same time.
699 */
700 err = __put_user(s->si_signo, &d->si_signo);
701 err |= __put_user(s->si_errno, &d->si_errno);
702 err |= __put_user((short)s->si_code, &d->si_code);
703 if (s->si_code < 0)
704 err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
705 SI_PAD_SIZE32);
706 else switch(s->si_code >> 16) {
707 case __SI_CHLD >> 16:
708 err |= __put_user(s->si_pid, &d->si_pid);
709 err |= __put_user(s->si_uid, &d->si_uid);
710 err |= __put_user(s->si_utime, &d->si_utime);
711 err |= __put_user(s->si_stime, &d->si_stime);
712 err |= __put_user(s->si_status, &d->si_status);
713 break;
714 case __SI_FAULT >> 16:
715 err |= __put_user((unsigned int)(unsigned long)s->si_addr,
716 &d->si_addr);
717 break;
718 case __SI_POLL >> 16:
719 err |= __put_user(s->si_band, &d->si_band);
720 err |= __put_user(s->si_fd, &d->si_fd);
721 break;
722 case __SI_TIMER >> 16:
723 err |= __put_user(s->si_tid, &d->si_tid);
724 err |= __put_user(s->si_overrun, &d->si_overrun);
725 err |= __put_user(s->si_int, &d->si_int);
726 break;
727 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
728 case __SI_MESGQ >> 16:
729 err |= __put_user(s->si_int, &d->si_int);
730 /* fallthrough */
731 case __SI_KILL >> 16:
732 default:
733 err |= __put_user(s->si_pid, &d->si_pid);
734 err |= __put_user(s->si_uid, &d->si_uid);
735 break;
736 }
737 return err;
738 }
739
740 #define copy_siginfo_to_user copy_siginfo_to_user32
741
copy_siginfo_from_user32(siginfo_t * to,struct compat_siginfo __user * from)742 int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
743 {
744 memset(to, 0, sizeof *to);
745
746 if (copy_from_user(to, from, 3*sizeof(int)) ||
747 copy_from_user(to->_sifields._pad,
748 from->_sifields._pad, SI_PAD_SIZE32))
749 return -EFAULT;
750
751 return 0;
752 }
753
754 /*
755 * Note: it is necessary to treat pid and sig as unsigned ints, with the
756 * corresponding cast to a signed int to insure that the proper conversion
757 * (sign extension) between the register representation of a signed int
758 * (msr in 32-bit mode) and the register representation of a signed int
759 * (msr in 64-bit mode) is performed.
760 */
compat_sys_rt_sigqueueinfo(u32 pid,u32 sig,compat_siginfo_t __user * uinfo)761 long compat_sys_rt_sigqueueinfo(u32 pid, u32 sig, compat_siginfo_t __user *uinfo)
762 {
763 siginfo_t info;
764 int ret;
765 mm_segment_t old_fs = get_fs();
766
767 ret = copy_siginfo_from_user32(&info, uinfo);
768 if (unlikely(ret))
769 return ret;
770
771 set_fs (KERNEL_DS);
772 /* The __user pointer cast is valid becasuse of the set_fs() */
773 ret = sys_rt_sigqueueinfo((int)pid, (int)sig, (siginfo_t __user *) &info);
774 set_fs (old_fs);
775 return ret;
776 }
777 /*
778 * Start Alternate signal stack support
779 *
780 * System Calls
781 * sigaltatck compat_sys_sigaltstack
782 */
783
compat_sys_sigaltstack(u32 __new,u32 __old,int r5,int r6,int r7,int r8,struct pt_regs * regs)784 int compat_sys_sigaltstack(u32 __new, u32 __old, int r5,
785 int r6, int r7, int r8, struct pt_regs *regs)
786 {
787 stack_32_t __user * newstack = compat_ptr(__new);
788 stack_32_t __user * oldstack = compat_ptr(__old);
789 stack_t uss, uoss;
790 int ret;
791 mm_segment_t old_fs;
792 unsigned long sp;
793 compat_uptr_t ss_sp;
794
795 /*
796 * set sp to the user stack on entry to the system call
797 * the system call router sets R9 to the saved registers
798 */
799 sp = regs->gpr[1];
800
801 /* Put new stack info in local 64 bit stack struct */
802 if (newstack) {
803 if (get_user(ss_sp, &newstack->ss_sp) ||
804 __get_user(uss.ss_flags, &newstack->ss_flags) ||
805 __get_user(uss.ss_size, &newstack->ss_size))
806 return -EFAULT;
807 uss.ss_sp = compat_ptr(ss_sp);
808 }
809
810 old_fs = get_fs();
811 set_fs(KERNEL_DS);
812 /* The __user pointer casts are valid because of the set_fs() */
813 ret = do_sigaltstack(
814 newstack ? (stack_t __user *) &uss : NULL,
815 oldstack ? (stack_t __user *) &uoss : NULL,
816 sp);
817 set_fs(old_fs);
818 /* Copy the stack information to the user output buffer */
819 if (!ret && oldstack &&
820 (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) ||
821 __put_user(uoss.ss_flags, &oldstack->ss_flags) ||
822 __put_user(uoss.ss_size, &oldstack->ss_size)))
823 return -EFAULT;
824 return ret;
825 }
826 #endif /* CONFIG_PPC64 */
827
828 /*
829 * Set up a signal frame for a "real-time" signal handler
830 * (one which gets siginfo).
831 */
handle_rt_signal32(unsigned long sig,struct k_sigaction * ka,siginfo_t * info,sigset_t * oldset,struct pt_regs * regs)832 int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
833 siginfo_t *info, sigset_t *oldset,
834 struct pt_regs *regs)
835 {
836 struct rt_sigframe __user *rt_sf;
837 struct mcontext __user *frame;
838 void __user *addr;
839 unsigned long newsp = 0;
840
841 /* Set up Signal Frame */
842 /* Put a Real Time Context onto stack */
843 rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
844 addr = rt_sf;
845 if (unlikely(rt_sf == NULL))
846 goto badframe;
847
848 /* Put the siginfo & fill in most of the ucontext */
849 if (copy_siginfo_to_user(&rt_sf->info, info)
850 || __put_user(0, &rt_sf->uc.uc_flags)
851 || __put_user(0, &rt_sf->uc.uc_link)
852 || __put_user(current->sas_ss_sp, &rt_sf->uc.uc_stack.ss_sp)
853 || __put_user(sas_ss_flags(regs->gpr[1]),
854 &rt_sf->uc.uc_stack.ss_flags)
855 || __put_user(current->sas_ss_size, &rt_sf->uc.uc_stack.ss_size)
856 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
857 &rt_sf->uc.uc_regs)
858 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
859 goto badframe;
860
861 /* Save user registers on the stack */
862 frame = &rt_sf->uc.uc_mcontext;
863 addr = frame;
864 if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
865 if (save_user_regs(regs, frame, 0, 1))
866 goto badframe;
867 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
868 } else {
869 if (save_user_regs(regs, frame, __NR_rt_sigreturn, 1))
870 goto badframe;
871 regs->link = (unsigned long) frame->tramp;
872 }
873
874 current->thread.fpscr.val = 0; /* turn off all fp exceptions */
875
876 /* create a stack frame for the caller of the handler */
877 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
878 addr = (void __user *)regs->gpr[1];
879 if (put_user(regs->gpr[1], (u32 __user *)newsp))
880 goto badframe;
881
882 /* Fill registers for signal handler */
883 regs->gpr[1] = newsp;
884 regs->gpr[3] = sig;
885 regs->gpr[4] = (unsigned long) &rt_sf->info;
886 regs->gpr[5] = (unsigned long) &rt_sf->uc;
887 regs->gpr[6] = (unsigned long) rt_sf;
888 regs->nip = (unsigned long) ka->sa.sa_handler;
889 /* enter the signal handler in big-endian mode */
890 regs->msr &= ~MSR_LE;
891 return 1;
892
893 badframe:
894 #ifdef DEBUG_SIG
895 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
896 regs, frame, newsp);
897 #endif
898 if (show_unhandled_signals)
899 printk_ratelimited(KERN_INFO
900 "%s[%d]: bad frame in handle_rt_signal32: "
901 "%p nip %08lx lr %08lx\n",
902 current->comm, current->pid,
903 addr, regs->nip, regs->link);
904
905 force_sigsegv(sig, current);
906 return 0;
907 }
908
do_setcontext(struct ucontext __user * ucp,struct pt_regs * regs,int sig)909 static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
910 {
911 sigset_t set;
912 struct mcontext __user *mcp;
913
914 if (get_sigset_t(&set, &ucp->uc_sigmask))
915 return -EFAULT;
916 #ifdef CONFIG_PPC64
917 {
918 u32 cmcp;
919
920 if (__get_user(cmcp, &ucp->uc_regs))
921 return -EFAULT;
922 mcp = (struct mcontext __user *)(u64)cmcp;
923 /* no need to check access_ok(mcp), since mcp < 4GB */
924 }
925 #else
926 if (__get_user(mcp, &ucp->uc_regs))
927 return -EFAULT;
928 if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
929 return -EFAULT;
930 #endif
931 restore_sigmask(&set);
932 if (restore_user_regs(regs, mcp, sig))
933 return -EFAULT;
934
935 return 0;
936 }
937
sys_swapcontext(struct ucontext __user * old_ctx,struct ucontext __user * new_ctx,int ctx_size,int r6,int r7,int r8,struct pt_regs * regs)938 long sys_swapcontext(struct ucontext __user *old_ctx,
939 struct ucontext __user *new_ctx,
940 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
941 {
942 unsigned char tmp;
943 int ctx_has_vsx_region = 0;
944
945 #ifdef CONFIG_PPC64
946 unsigned long new_msr = 0;
947
948 if (new_ctx) {
949 struct mcontext __user *mcp;
950 u32 cmcp;
951
952 /*
953 * Get pointer to the real mcontext. No need for
954 * access_ok since we are dealing with compat
955 * pointers.
956 */
957 if (__get_user(cmcp, &new_ctx->uc_regs))
958 return -EFAULT;
959 mcp = (struct mcontext __user *)(u64)cmcp;
960 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
961 return -EFAULT;
962 }
963 /*
964 * Check that the context is not smaller than the original
965 * size (with VMX but without VSX)
966 */
967 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
968 return -EINVAL;
969 /*
970 * If the new context state sets the MSR VSX bits but
971 * it doesn't provide VSX state.
972 */
973 if ((ctx_size < sizeof(struct ucontext)) &&
974 (new_msr & MSR_VSX))
975 return -EINVAL;
976 /* Does the context have enough room to store VSX data? */
977 if (ctx_size >= sizeof(struct ucontext))
978 ctx_has_vsx_region = 1;
979 #else
980 /* Context size is for future use. Right now, we only make sure
981 * we are passed something we understand
982 */
983 if (ctx_size < sizeof(struct ucontext))
984 return -EINVAL;
985 #endif
986 if (old_ctx != NULL) {
987 struct mcontext __user *mctx;
988
989 /*
990 * old_ctx might not be 16-byte aligned, in which
991 * case old_ctx->uc_mcontext won't be either.
992 * Because we have the old_ctx->uc_pad2 field
993 * before old_ctx->uc_mcontext, we need to round down
994 * from &old_ctx->uc_mcontext to a 16-byte boundary.
995 */
996 mctx = (struct mcontext __user *)
997 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
998 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
999 || save_user_regs(regs, mctx, 0, ctx_has_vsx_region)
1000 || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked)
1001 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1002 return -EFAULT;
1003 }
1004 if (new_ctx == NULL)
1005 return 0;
1006 if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
1007 || __get_user(tmp, (u8 __user *) new_ctx)
1008 || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
1009 return -EFAULT;
1010
1011 /*
1012 * If we get a fault copying the context into the kernel's
1013 * image of the user's registers, we can't just return -EFAULT
1014 * because the user's registers will be corrupted. For instance
1015 * the NIP value may have been updated but not some of the
1016 * other registers. Given that we have done the access_ok
1017 * and successfully read the first and last bytes of the region
1018 * above, this should only happen in an out-of-memory situation
1019 * or if another thread unmaps the region containing the context.
1020 * We kill the task with a SIGSEGV in this situation.
1021 */
1022 if (do_setcontext(new_ctx, regs, 0))
1023 do_exit(SIGSEGV);
1024
1025 set_thread_flag(TIF_RESTOREALL);
1026 return 0;
1027 }
1028
sys_rt_sigreturn(int r3,int r4,int r5,int r6,int r7,int r8,struct pt_regs * regs)1029 long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1030 struct pt_regs *regs)
1031 {
1032 struct rt_sigframe __user *rt_sf;
1033
1034 /* Always make any pending restarted system calls return -EINTR */
1035 current_thread_info()->restart_block.fn = do_no_restart_syscall;
1036
1037 rt_sf = (struct rt_sigframe __user *)
1038 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1039 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1040 goto bad;
1041 if (do_setcontext(&rt_sf->uc, regs, 1))
1042 goto bad;
1043
1044 /*
1045 * It's not clear whether or why it is desirable to save the
1046 * sigaltstack setting on signal delivery and restore it on
1047 * signal return. But other architectures do this and we have
1048 * always done it up until now so it is probably better not to
1049 * change it. -- paulus
1050 */
1051 #ifdef CONFIG_PPC64
1052 /*
1053 * We use the compat_sys_ version that does the 32/64 bits conversion
1054 * and takes userland pointer directly. What about error checking ?
1055 * nobody does any...
1056 */
1057 compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs);
1058 #else
1059 do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]);
1060 #endif
1061 set_thread_flag(TIF_RESTOREALL);
1062 return 0;
1063
1064 bad:
1065 if (show_unhandled_signals)
1066 printk_ratelimited(KERN_INFO
1067 "%s[%d]: bad frame in sys_rt_sigreturn: "
1068 "%p nip %08lx lr %08lx\n",
1069 current->comm, current->pid,
1070 rt_sf, regs->nip, regs->link);
1071
1072 force_sig(SIGSEGV, current);
1073 return 0;
1074 }
1075
1076 #ifdef CONFIG_PPC32
sys_debug_setcontext(struct ucontext __user * ctx,int ndbg,struct sig_dbg_op __user * dbg,int r6,int r7,int r8,struct pt_regs * regs)1077 int sys_debug_setcontext(struct ucontext __user *ctx,
1078 int ndbg, struct sig_dbg_op __user *dbg,
1079 int r6, int r7, int r8,
1080 struct pt_regs *regs)
1081 {
1082 struct sig_dbg_op op;
1083 int i;
1084 unsigned char tmp;
1085 unsigned long new_msr = regs->msr;
1086 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1087 unsigned long new_dbcr0 = current->thread.dbcr0;
1088 #endif
1089
1090 for (i=0; i<ndbg; i++) {
1091 if (copy_from_user(&op, dbg + i, sizeof(op)))
1092 return -EFAULT;
1093 switch (op.dbg_type) {
1094 case SIG_DBG_SINGLE_STEPPING:
1095 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1096 if (op.dbg_value) {
1097 new_msr |= MSR_DE;
1098 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1099 } else {
1100 new_dbcr0 &= ~DBCR0_IC;
1101 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1102 current->thread.dbcr1)) {
1103 new_msr &= ~MSR_DE;
1104 new_dbcr0 &= ~DBCR0_IDM;
1105 }
1106 }
1107 #else
1108 if (op.dbg_value)
1109 new_msr |= MSR_SE;
1110 else
1111 new_msr &= ~MSR_SE;
1112 #endif
1113 break;
1114 case SIG_DBG_BRANCH_TRACING:
1115 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1116 return -EINVAL;
1117 #else
1118 if (op.dbg_value)
1119 new_msr |= MSR_BE;
1120 else
1121 new_msr &= ~MSR_BE;
1122 #endif
1123 break;
1124
1125 default:
1126 return -EINVAL;
1127 }
1128 }
1129
1130 /* We wait until here to actually install the values in the
1131 registers so if we fail in the above loop, it will not
1132 affect the contents of these registers. After this point,
1133 failure is a problem, anyway, and it's very unlikely unless
1134 the user is really doing something wrong. */
1135 regs->msr = new_msr;
1136 #ifdef CONFIG_PPC_ADV_DEBUG_REGS
1137 current->thread.dbcr0 = new_dbcr0;
1138 #endif
1139
1140 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1141 || __get_user(tmp, (u8 __user *) ctx)
1142 || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1143 return -EFAULT;
1144
1145 /*
1146 * If we get a fault copying the context into the kernel's
1147 * image of the user's registers, we can't just return -EFAULT
1148 * because the user's registers will be corrupted. For instance
1149 * the NIP value may have been updated but not some of the
1150 * other registers. Given that we have done the access_ok
1151 * and successfully read the first and last bytes of the region
1152 * above, this should only happen in an out-of-memory situation
1153 * or if another thread unmaps the region containing the context.
1154 * We kill the task with a SIGSEGV in this situation.
1155 */
1156 if (do_setcontext(ctx, regs, 1)) {
1157 if (show_unhandled_signals)
1158 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1159 "sys_debug_setcontext: %p nip %08lx "
1160 "lr %08lx\n",
1161 current->comm, current->pid,
1162 ctx, regs->nip, regs->link);
1163
1164 force_sig(SIGSEGV, current);
1165 goto out;
1166 }
1167
1168 /*
1169 * It's not clear whether or why it is desirable to save the
1170 * sigaltstack setting on signal delivery and restore it on
1171 * signal return. But other architectures do this and we have
1172 * always done it up until now so it is probably better not to
1173 * change it. -- paulus
1174 */
1175 do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]);
1176
1177 set_thread_flag(TIF_RESTOREALL);
1178 out:
1179 return 0;
1180 }
1181 #endif
1182
1183 /*
1184 * OK, we're invoking a handler
1185 */
handle_signal32(unsigned long sig,struct k_sigaction * ka,siginfo_t * info,sigset_t * oldset,struct pt_regs * regs)1186 int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1187 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
1188 {
1189 struct sigcontext __user *sc;
1190 struct sigframe __user *frame;
1191 unsigned long newsp = 0;
1192
1193 /* Set up Signal Frame */
1194 frame = get_sigframe(ka, regs, sizeof(*frame), 1);
1195 if (unlikely(frame == NULL))
1196 goto badframe;
1197 sc = (struct sigcontext __user *) &frame->sctx;
1198
1199 #if _NSIG != 64
1200 #error "Please adjust handle_signal()"
1201 #endif
1202 if (__put_user(to_user_ptr(ka->sa.sa_handler), &sc->handler)
1203 || __put_user(oldset->sig[0], &sc->oldmask)
1204 #ifdef CONFIG_PPC64
1205 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1206 #else
1207 || __put_user(oldset->sig[1], &sc->_unused[3])
1208 #endif
1209 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1210 || __put_user(sig, &sc->signal))
1211 goto badframe;
1212
1213 if (vdso32_sigtramp && current->mm->context.vdso_base) {
1214 if (save_user_regs(regs, &frame->mctx, 0, 1))
1215 goto badframe;
1216 regs->link = current->mm->context.vdso_base + vdso32_sigtramp;
1217 } else {
1218 if (save_user_regs(regs, &frame->mctx, __NR_sigreturn, 1))
1219 goto badframe;
1220 regs->link = (unsigned long) frame->mctx.tramp;
1221 }
1222
1223 current->thread.fpscr.val = 0; /* turn off all fp exceptions */
1224
1225 /* create a stack frame for the caller of the handler */
1226 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1227 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1228 goto badframe;
1229
1230 regs->gpr[1] = newsp;
1231 regs->gpr[3] = sig;
1232 regs->gpr[4] = (unsigned long) sc;
1233 regs->nip = (unsigned long) ka->sa.sa_handler;
1234 /* enter the signal handler in big-endian mode */
1235 regs->msr &= ~MSR_LE;
1236
1237 return 1;
1238
1239 badframe:
1240 #ifdef DEBUG_SIG
1241 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1242 regs, frame, newsp);
1243 #endif
1244 if (show_unhandled_signals)
1245 printk_ratelimited(KERN_INFO
1246 "%s[%d]: bad frame in handle_signal32: "
1247 "%p nip %08lx lr %08lx\n",
1248 current->comm, current->pid,
1249 frame, regs->nip, regs->link);
1250
1251 force_sigsegv(sig, current);
1252 return 0;
1253 }
1254
1255 /*
1256 * Do a signal return; undo the signal stack.
1257 */
sys_sigreturn(int r3,int r4,int r5,int r6,int r7,int r8,struct pt_regs * regs)1258 long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1259 struct pt_regs *regs)
1260 {
1261 struct sigcontext __user *sc;
1262 struct sigcontext sigctx;
1263 struct mcontext __user *sr;
1264 void __user *addr;
1265 sigset_t set;
1266
1267 /* Always make any pending restarted system calls return -EINTR */
1268 current_thread_info()->restart_block.fn = do_no_restart_syscall;
1269
1270 sc = (struct sigcontext __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1271 addr = sc;
1272 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1273 goto badframe;
1274
1275 #ifdef CONFIG_PPC64
1276 /*
1277 * Note that PPC32 puts the upper 32 bits of the sigmask in the
1278 * unused part of the signal stackframe
1279 */
1280 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1281 #else
1282 set.sig[0] = sigctx.oldmask;
1283 set.sig[1] = sigctx._unused[3];
1284 #endif
1285 restore_sigmask(&set);
1286
1287 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1288 addr = sr;
1289 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1290 || restore_user_regs(regs, sr, 1))
1291 goto badframe;
1292
1293 set_thread_flag(TIF_RESTOREALL);
1294 return 0;
1295
1296 badframe:
1297 if (show_unhandled_signals)
1298 printk_ratelimited(KERN_INFO
1299 "%s[%d]: bad frame in sys_sigreturn: "
1300 "%p nip %08lx lr %08lx\n",
1301 current->comm, current->pid,
1302 addr, regs->nip, regs->link);
1303
1304 force_sig(SIGSEGV, current);
1305 return 0;
1306 }
1307