1 /*
2 * linux/arch/m68knommu/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11 /*
12 * Linux/m68k support by Hamish Macdonald
13 *
14 * 68060 fixes by Jesper Skov
15 *
16 * 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab
17 *
18 * mathemu support by Roman Zippel
19 * (Note: fpstate in the signal context is completely ignored for the emulator
20 * and the internal floating point format is put on stack)
21 */
22
23 /*
24 * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
25 * Atari :-) Current limitation: Only one sigstack can be active at one time.
26 * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
27 * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
28 * signal handlers!
29 */
30
31 #include <linux/sched.h>
32 #include <linux/mm.h>
33 #include <linux/kernel.h>
34 #include <linux/signal.h>
35 #include <linux/syscalls.h>
36 #include <linux/errno.h>
37 #include <linux/wait.h>
38 #include <linux/ptrace.h>
39 #include <linux/unistd.h>
40 #include <linux/stddef.h>
41 #include <linux/highuid.h>
42 #include <linux/tty.h>
43 #include <linux/personality.h>
44 #include <linux/binfmts.h>
45
46 #include <asm/setup.h>
47 #include <asm/uaccess.h>
48 #include <asm/pgtable.h>
49 #include <asm/traps.h>
50 #include <asm/ucontext.h>
51
52 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
53
54 void ret_from_user_signal(void);
55 void ret_from_user_rt_signal(void);
56
57 /*
58 * Atomically swap in the new signal mask, and wait for a signal.
59 */
60 asmlinkage int
sys_sigsuspend(int unused0,int unused1,old_sigset_t mask)61 sys_sigsuspend(int unused0, int unused1, old_sigset_t mask)
62 {
63 mask &= _BLOCKABLE;
64 spin_lock_irq(¤t->sighand->siglock);
65 current->saved_sigmask = current->blocked;
66 siginitset(¤t->blocked, mask);
67 recalc_sigpending();
68 spin_unlock_irq(¤t->sighand->siglock);
69
70 current->state = TASK_INTERRUPTIBLE;
71 schedule();
72 set_restore_sigmask();
73
74 return -ERESTARTNOHAND;
75 }
76
77 asmlinkage int
sys_sigaction(int sig,const struct old_sigaction __user * act,struct old_sigaction __user * oact)78 sys_sigaction(int sig, const struct old_sigaction __user *act,
79 struct old_sigaction __user *oact)
80 {
81 struct k_sigaction new_ka, old_ka;
82 int ret;
83
84 if (act) {
85 old_sigset_t mask;
86 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
87 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
88 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
89 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
90 __get_user(mask, &act->sa_mask))
91 return -EFAULT;
92 siginitset(&new_ka.sa.sa_mask, mask);
93 }
94
95 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
96
97 if (!ret && oact) {
98 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
99 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
100 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
101 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
102 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
103 return -EFAULT;
104 }
105
106 return ret;
107 }
108
109 asmlinkage int
sys_sigaltstack(const stack_t __user * uss,stack_t __user * uoss)110 sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
111 {
112 return do_sigaltstack(uss, uoss, rdusp());
113 }
114
115
116 /*
117 * Do a signal return; undo the signal stack.
118 *
119 * Keep the return code on the stack quadword aligned!
120 * That makes the cache flush below easier.
121 */
122
123 struct sigframe
124 {
125 char __user *pretcode;
126 int sig;
127 int code;
128 struct sigcontext __user *psc;
129 char retcode[8];
130 unsigned long extramask[_NSIG_WORDS-1];
131 struct sigcontext sc;
132 };
133
134 struct rt_sigframe
135 {
136 char __user *pretcode;
137 int sig;
138 struct siginfo __user *pinfo;
139 void __user *puc;
140 char retcode[8];
141 struct siginfo info;
142 struct ucontext uc;
143 };
144
145 #ifdef CONFIG_FPU
146
147 static unsigned char fpu_version = 0; /* version number of fpu, set by setup_frame */
148
restore_fpu_state(struct sigcontext * sc)149 static inline int restore_fpu_state(struct sigcontext *sc)
150 {
151 int err = 1;
152
153 if (FPU_IS_EMU) {
154 /* restore registers */
155 memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
156 memcpy(current->thread.fp, sc->sc_fpregs, 24);
157 return 0;
158 }
159
160 if (sc->sc_fpstate[0]) {
161 /* Verify the frame format. */
162 if (sc->sc_fpstate[0] != fpu_version)
163 goto out;
164
165 __asm__ volatile (".chip 68k/68881\n\t"
166 "fmovemx %0,%%fp0-%%fp1\n\t"
167 "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
168 ".chip 68k"
169 : /* no outputs */
170 : "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl));
171 }
172 __asm__ volatile (".chip 68k/68881\n\t"
173 "frestore %0\n\t"
174 ".chip 68k" : : "m" (*sc->sc_fpstate));
175 err = 0;
176
177 out:
178 return err;
179 }
180
181 #define FPCONTEXT_SIZE 216
182 #define uc_fpstate uc_filler[0]
183 #define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
184 #define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
185
rt_restore_fpu_state(struct ucontext __user * uc)186 static inline int rt_restore_fpu_state(struct ucontext __user *uc)
187 {
188 unsigned char fpstate[FPCONTEXT_SIZE];
189 int context_size = 0;
190 fpregset_t fpregs;
191 int err = 1;
192
193 if (FPU_IS_EMU) {
194 /* restore fpu control register */
195 if (__copy_from_user(current->thread.fpcntl,
196 uc->uc_mcontext.fpregs.f_fpcntl, 12))
197 goto out;
198 /* restore all other fpu register */
199 if (__copy_from_user(current->thread.fp,
200 uc->uc_mcontext.fpregs.f_fpregs, 96))
201 goto out;
202 return 0;
203 }
204
205 if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
206 goto out;
207 if (fpstate[0]) {
208 context_size = fpstate[1];
209
210 /* Verify the frame format. */
211 if (fpstate[0] != fpu_version)
212 goto out;
213 if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
214 sizeof(fpregs)))
215 goto out;
216 __asm__ volatile (".chip 68k/68881\n\t"
217 "fmovemx %0,%%fp0-%%fp7\n\t"
218 "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
219 ".chip 68k"
220 : /* no outputs */
221 : "m" (*fpregs.f_fpregs),
222 "m" (*fpregs.f_fpcntl));
223 }
224 if (context_size &&
225 __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
226 context_size))
227 goto out;
228 __asm__ volatile (".chip 68k/68881\n\t"
229 "frestore %0\n\t"
230 ".chip 68k" : : "m" (*fpstate));
231 err = 0;
232
233 out:
234 return err;
235 }
236
237 #endif
238
239 static inline int
restore_sigcontext(struct pt_regs * regs,struct sigcontext __user * usc,void __user * fp,int * pd0)240 restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp,
241 int *pd0)
242 {
243 int formatvec;
244 struct sigcontext context;
245 int err = 0;
246
247 /* Always make any pending restarted system calls return -EINTR */
248 current_thread_info()->restart_block.fn = do_no_restart_syscall;
249
250 /* get previous context */
251 if (copy_from_user(&context, usc, sizeof(context)))
252 goto badframe;
253
254 /* restore passed registers */
255 regs->d1 = context.sc_d1;
256 regs->a0 = context.sc_a0;
257 regs->a1 = context.sc_a1;
258 ((struct switch_stack *)regs - 1)->a5 = context.sc_a5;
259 regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
260 regs->pc = context.sc_pc;
261 regs->orig_d0 = -1; /* disable syscall checks */
262 wrusp(context.sc_usp);
263 formatvec = context.sc_formatvec;
264 regs->format = formatvec >> 12;
265 regs->vector = formatvec & 0xfff;
266
267 #ifdef CONFIG_FPU
268 err = restore_fpu_state(&context);
269 #endif
270
271 *pd0 = context.sc_d0;
272 return err;
273
274 badframe:
275 return 1;
276 }
277
278 static inline int
rt_restore_ucontext(struct pt_regs * regs,struct switch_stack * sw,struct ucontext __user * uc,int * pd0)279 rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
280 struct ucontext __user *uc, int *pd0)
281 {
282 int temp;
283 greg_t __user *gregs = uc->uc_mcontext.gregs;
284 unsigned long usp;
285 int err;
286
287 /* Always make any pending restarted system calls return -EINTR */
288 current_thread_info()->restart_block.fn = do_no_restart_syscall;
289
290 err = __get_user(temp, &uc->uc_mcontext.version);
291 if (temp != MCONTEXT_VERSION)
292 goto badframe;
293 /* restore passed registers */
294 err |= __get_user(regs->d0, &gregs[0]);
295 err |= __get_user(regs->d1, &gregs[1]);
296 err |= __get_user(regs->d2, &gregs[2]);
297 err |= __get_user(regs->d3, &gregs[3]);
298 err |= __get_user(regs->d4, &gregs[4]);
299 err |= __get_user(regs->d5, &gregs[5]);
300 err |= __get_user(sw->d6, &gregs[6]);
301 err |= __get_user(sw->d7, &gregs[7]);
302 err |= __get_user(regs->a0, &gregs[8]);
303 err |= __get_user(regs->a1, &gregs[9]);
304 err |= __get_user(regs->a2, &gregs[10]);
305 err |= __get_user(sw->a3, &gregs[11]);
306 err |= __get_user(sw->a4, &gregs[12]);
307 err |= __get_user(sw->a5, &gregs[13]);
308 err |= __get_user(sw->a6, &gregs[14]);
309 err |= __get_user(usp, &gregs[15]);
310 wrusp(usp);
311 err |= __get_user(regs->pc, &gregs[16]);
312 err |= __get_user(temp, &gregs[17]);
313 regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
314 regs->orig_d0 = -1; /* disable syscall checks */
315 regs->format = temp >> 12;
316 regs->vector = temp & 0xfff;
317
318 if (do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
319 goto badframe;
320
321 *pd0 = regs->d0;
322 return err;
323
324 badframe:
325 return 1;
326 }
327
do_sigreturn(unsigned long __unused)328 asmlinkage int do_sigreturn(unsigned long __unused)
329 {
330 struct switch_stack *sw = (struct switch_stack *) &__unused;
331 struct pt_regs *regs = (struct pt_regs *) (sw + 1);
332 unsigned long usp = rdusp();
333 struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
334 sigset_t set;
335 int d0;
336
337 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
338 goto badframe;
339 if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
340 (_NSIG_WORDS > 1 &&
341 __copy_from_user(&set.sig[1], &frame->extramask,
342 sizeof(frame->extramask))))
343 goto badframe;
344
345 sigdelsetmask(&set, ~_BLOCKABLE);
346 spin_lock_irq(¤t->sighand->siglock);
347 current->blocked = set;
348 recalc_sigpending();
349 spin_unlock_irq(¤t->sighand->siglock);
350
351 if (restore_sigcontext(regs, &frame->sc, frame + 1, &d0))
352 goto badframe;
353 return d0;
354
355 badframe:
356 force_sig(SIGSEGV, current);
357 return 0;
358 }
359
do_rt_sigreturn(unsigned long __unused)360 asmlinkage int do_rt_sigreturn(unsigned long __unused)
361 {
362 struct switch_stack *sw = (struct switch_stack *) &__unused;
363 struct pt_regs *regs = (struct pt_regs *) (sw + 1);
364 unsigned long usp = rdusp();
365 struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
366 sigset_t set;
367 int d0;
368
369 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
370 goto badframe;
371 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
372 goto badframe;
373
374 sigdelsetmask(&set, ~_BLOCKABLE);
375 spin_lock_irq(¤t->sighand->siglock);
376 current->blocked = set;
377 recalc_sigpending();
378 spin_unlock_irq(¤t->sighand->siglock);
379
380 if (rt_restore_ucontext(regs, sw, &frame->uc, &d0))
381 goto badframe;
382 return d0;
383
384 badframe:
385 force_sig(SIGSEGV, current);
386 return 0;
387 }
388
389 #ifdef CONFIG_FPU
390 /*
391 * Set up a signal frame.
392 */
393
save_fpu_state(struct sigcontext * sc,struct pt_regs * regs)394 static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
395 {
396 if (FPU_IS_EMU) {
397 /* save registers */
398 memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
399 memcpy(sc->sc_fpregs, current->thread.fp, 24);
400 return;
401 }
402
403 __asm__ volatile (".chip 68k/68881\n\t"
404 "fsave %0\n\t"
405 ".chip 68k"
406 : : "m" (*sc->sc_fpstate) : "memory");
407
408 if (sc->sc_fpstate[0]) {
409 fpu_version = sc->sc_fpstate[0];
410 __asm__ volatile (".chip 68k/68881\n\t"
411 "fmovemx %%fp0-%%fp1,%0\n\t"
412 "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
413 ".chip 68k"
414 : "=m" (*sc->sc_fpregs),
415 "=m" (*sc->sc_fpcntl)
416 : /* no inputs */
417 : "memory");
418 }
419 }
420
rt_save_fpu_state(struct ucontext __user * uc,struct pt_regs * regs)421 static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
422 {
423 unsigned char fpstate[FPCONTEXT_SIZE];
424 int context_size = 0;
425 int err = 0;
426
427 if (FPU_IS_EMU) {
428 /* save fpu control register */
429 err |= copy_to_user(uc->uc_mcontext.fpregs.f_pcntl,
430 current->thread.fpcntl, 12);
431 /* save all other fpu register */
432 err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
433 current->thread.fp, 96);
434 return err;
435 }
436
437 __asm__ volatile (".chip 68k/68881\n\t"
438 "fsave %0\n\t"
439 ".chip 68k"
440 : : "m" (*fpstate) : "memory");
441
442 err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
443 if (fpstate[0]) {
444 fpregset_t fpregs;
445 context_size = fpstate[1];
446 fpu_version = fpstate[0];
447 __asm__ volatile (".chip 68k/68881\n\t"
448 "fmovemx %%fp0-%%fp7,%0\n\t"
449 "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
450 ".chip 68k"
451 : "=m" (*fpregs.f_fpregs),
452 "=m" (*fpregs.f_fpcntl)
453 : /* no inputs */
454 : "memory");
455 err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
456 sizeof(fpregs));
457 }
458 if (context_size)
459 err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
460 context_size);
461 return err;
462 }
463
464 #endif
465
setup_sigcontext(struct sigcontext * sc,struct pt_regs * regs,unsigned long mask)466 static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
467 unsigned long mask)
468 {
469 sc->sc_mask = mask;
470 sc->sc_usp = rdusp();
471 sc->sc_d0 = regs->d0;
472 sc->sc_d1 = regs->d1;
473 sc->sc_a0 = regs->a0;
474 sc->sc_a1 = regs->a1;
475 sc->sc_a5 = ((struct switch_stack *)regs - 1)->a5;
476 sc->sc_sr = regs->sr;
477 sc->sc_pc = regs->pc;
478 sc->sc_formatvec = regs->format << 12 | regs->vector;
479 #ifdef CONFIG_FPU
480 save_fpu_state(sc, regs);
481 #endif
482 }
483
rt_setup_ucontext(struct ucontext __user * uc,struct pt_regs * regs)484 static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
485 {
486 struct switch_stack *sw = (struct switch_stack *)regs - 1;
487 greg_t __user *gregs = uc->uc_mcontext.gregs;
488 int err = 0;
489
490 err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
491 err |= __put_user(regs->d0, &gregs[0]);
492 err |= __put_user(regs->d1, &gregs[1]);
493 err |= __put_user(regs->d2, &gregs[2]);
494 err |= __put_user(regs->d3, &gregs[3]);
495 err |= __put_user(regs->d4, &gregs[4]);
496 err |= __put_user(regs->d5, &gregs[5]);
497 err |= __put_user(sw->d6, &gregs[6]);
498 err |= __put_user(sw->d7, &gregs[7]);
499 err |= __put_user(regs->a0, &gregs[8]);
500 err |= __put_user(regs->a1, &gregs[9]);
501 err |= __put_user(regs->a2, &gregs[10]);
502 err |= __put_user(sw->a3, &gregs[11]);
503 err |= __put_user(sw->a4, &gregs[12]);
504 err |= __put_user(sw->a5, &gregs[13]);
505 err |= __put_user(sw->a6, &gregs[14]);
506 err |= __put_user(rdusp(), &gregs[15]);
507 err |= __put_user(regs->pc, &gregs[16]);
508 err |= __put_user(regs->sr, &gregs[17]);
509 #ifdef CONFIG_FPU
510 err |= rt_save_fpu_state(uc, regs);
511 #endif
512 return err;
513 }
514
515 static inline void __user *
get_sigframe(struct k_sigaction * ka,struct pt_regs * regs,size_t frame_size)516 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
517 {
518 unsigned long usp;
519
520 /* Default to using normal stack. */
521 usp = rdusp();
522
523 /* This is the X/Open sanctioned signal stack switching. */
524 if (ka->sa.sa_flags & SA_ONSTACK) {
525 if (!sas_ss_flags(usp))
526 usp = current->sas_ss_sp + current->sas_ss_size;
527 }
528 return (void __user *)((usp - frame_size) & -8UL);
529 }
530
setup_frame(int sig,struct k_sigaction * ka,sigset_t * set,struct pt_regs * regs)531 static int setup_frame (int sig, struct k_sigaction *ka,
532 sigset_t *set, struct pt_regs *regs)
533 {
534 struct sigframe __user *frame;
535 struct sigcontext context;
536 int err = 0;
537
538 frame = get_sigframe(ka, regs, sizeof(*frame));
539
540 err |= __put_user((current_thread_info()->exec_domain
541 && current_thread_info()->exec_domain->signal_invmap
542 && sig < 32
543 ? current_thread_info()->exec_domain->signal_invmap[sig]
544 : sig),
545 &frame->sig);
546
547 err |= __put_user(regs->vector, &frame->code);
548 err |= __put_user(&frame->sc, &frame->psc);
549
550 if (_NSIG_WORDS > 1)
551 err |= copy_to_user(frame->extramask, &set->sig[1],
552 sizeof(frame->extramask));
553
554 setup_sigcontext(&context, regs, set->sig[0]);
555 err |= copy_to_user (&frame->sc, &context, sizeof(context));
556
557 /* Set up to return from userspace. */
558 err |= __put_user((void *) ret_from_user_signal, &frame->pretcode);
559
560 if (err)
561 goto give_sigsegv;
562
563 /* Set up registers for signal handler */
564 wrusp ((unsigned long) frame);
565 regs->pc = (unsigned long) ka->sa.sa_handler;
566 ((struct switch_stack *)regs - 1)->a5 = current->mm->start_data;
567 regs->format = 0x4; /*set format byte to make stack appear modulo 4
568 which it will be when doing the rte */
569
570 adjust_stack:
571 /* Prepare to skip over the extra stuff in the exception frame. */
572 if (regs->stkadj) {
573 struct pt_regs *tregs =
574 (struct pt_regs *)((ulong)regs + regs->stkadj);
575 #if defined(DEBUG)
576 printk(KERN_DEBUG "Performing stackadjust=%04x\n", regs->stkadj);
577 #endif
578 /* This must be copied with decreasing addresses to
579 handle overlaps. */
580 tregs->vector = 0;
581 tregs->format = 0;
582 tregs->pc = regs->pc;
583 tregs->sr = regs->sr;
584 }
585 return err;
586
587 give_sigsegv:
588 force_sigsegv(sig, current);
589 goto adjust_stack;
590 }
591
setup_rt_frame(int sig,struct k_sigaction * ka,siginfo_t * info,sigset_t * set,struct pt_regs * regs)592 static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
593 sigset_t *set, struct pt_regs *regs)
594 {
595 struct rt_sigframe __user *frame;
596 int err = 0;
597
598 frame = get_sigframe(ka, regs, sizeof(*frame));
599
600 err |= __put_user((current_thread_info()->exec_domain
601 && current_thread_info()->exec_domain->signal_invmap
602 && sig < 32
603 ? current_thread_info()->exec_domain->signal_invmap[sig]
604 : sig),
605 &frame->sig);
606 err |= __put_user(&frame->info, &frame->pinfo);
607 err |= __put_user(&frame->uc, &frame->puc);
608 err |= copy_siginfo_to_user(&frame->info, info);
609
610 /* Create the ucontext. */
611 err |= __put_user(0, &frame->uc.uc_flags);
612 err |= __put_user(NULL, &frame->uc.uc_link);
613 err |= __put_user((void __user *)current->sas_ss_sp,
614 &frame->uc.uc_stack.ss_sp);
615 err |= __put_user(sas_ss_flags(rdusp()),
616 &frame->uc.uc_stack.ss_flags);
617 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
618 err |= rt_setup_ucontext(&frame->uc, regs);
619 err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
620
621 /* Set up to return from userspace. */
622 err |= __put_user((void *) ret_from_user_rt_signal, &frame->pretcode);
623
624 if (err)
625 goto give_sigsegv;
626
627 /* Set up registers for signal handler */
628 wrusp ((unsigned long) frame);
629 regs->pc = (unsigned long) ka->sa.sa_handler;
630 ((struct switch_stack *)regs - 1)->a5 = current->mm->start_data;
631 regs->format = 0x4; /*set format byte to make stack appear modulo 4
632 which it will be when doing the rte */
633
634 adjust_stack:
635 /* Prepare to skip over the extra stuff in the exception frame. */
636 if (regs->stkadj) {
637 struct pt_regs *tregs =
638 (struct pt_regs *)((ulong)regs + regs->stkadj);
639 #if defined(DEBUG)
640 printk(KERN_DEBUG "Performing stackadjust=%04x\n", regs->stkadj);
641 #endif
642 /* This must be copied with decreasing addresses to
643 handle overlaps. */
644 tregs->vector = 0;
645 tregs->format = 0;
646 tregs->pc = regs->pc;
647 tregs->sr = regs->sr;
648 }
649 return err;
650
651 give_sigsegv:
652 force_sigsegv(sig, current);
653 goto adjust_stack;
654 }
655
656 static inline void
handle_restart(struct pt_regs * regs,struct k_sigaction * ka,int has_handler)657 handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
658 {
659 switch (regs->d0) {
660 case -ERESTARTNOHAND:
661 if (!has_handler)
662 goto do_restart;
663 regs->d0 = -EINTR;
664 break;
665
666 case -ERESTART_RESTARTBLOCK:
667 if (!has_handler) {
668 regs->d0 = __NR_restart_syscall;
669 regs->pc -= 2;
670 break;
671 }
672 regs->d0 = -EINTR;
673 break;
674
675 case -ERESTARTSYS:
676 if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
677 regs->d0 = -EINTR;
678 break;
679 }
680 /* fallthrough */
681 case -ERESTARTNOINTR:
682 do_restart:
683 regs->d0 = regs->orig_d0;
684 regs->pc -= 2;
685 break;
686 }
687 }
688
689 /*
690 * OK, we're invoking a handler
691 */
692 static void
handle_signal(int sig,struct k_sigaction * ka,siginfo_t * info,sigset_t * oldset,struct pt_regs * regs)693 handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
694 sigset_t *oldset, struct pt_regs *regs)
695 {
696 int err;
697 /* are we from a system call? */
698 if (regs->orig_d0 >= 0)
699 /* If so, check system call restarting.. */
700 handle_restart(regs, ka, 1);
701
702 /* set up the stack frame */
703 if (ka->sa.sa_flags & SA_SIGINFO)
704 err = setup_rt_frame(sig, ka, info, oldset, regs);
705 else
706 err = setup_frame(sig, ka, oldset, regs);
707
708 if (err)
709 return;
710
711 spin_lock_irq(¤t->sighand->siglock);
712 sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
713 if (!(ka->sa.sa_flags & SA_NODEFER))
714 sigaddset(¤t->blocked,sig);
715 recalc_sigpending();
716 spin_unlock_irq(¤t->sighand->siglock);
717
718 clear_thread_flag(TIF_RESTORE_SIGMASK);
719 }
720
721 /*
722 * Note that 'init' is a special process: it doesn't get signals it doesn't
723 * want to handle. Thus you cannot kill init even with a SIGKILL even by
724 * mistake.
725 */
do_signal(struct pt_regs * regs)726 asmlinkage void do_signal(struct pt_regs *regs)
727 {
728 struct k_sigaction ka;
729 siginfo_t info;
730 int signr;
731 sigset_t *oldset;
732
733 /*
734 * We want the common case to go fast, which
735 * is why we may in certain cases get here from
736 * kernel mode. Just return without doing anything
737 * if so.
738 */
739 if (!user_mode(regs))
740 return;
741
742 if (test_thread_flag(TIF_RESTORE_SIGMASK))
743 oldset = ¤t->saved_sigmask;
744 else
745 oldset = ¤t->blocked;
746
747 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
748 if (signr > 0) {
749 /* Whee! Actually deliver the signal. */
750 handle_signal(signr, &ka, &info, oldset, regs);
751 return;
752 }
753
754 /* Did we come from a system call? */
755 if (regs->orig_d0 >= 0) {
756 /* Restart the system call - no handlers present */
757 handle_restart(regs, NULL, 0);
758 }
759
760 /* If there's no signal to deliver, we just restore the saved mask. */
761 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
762 clear_thread_flag(TIF_RESTORE_SIGMASK);
763 sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
764 }
765 }
766