1 /*
2 * linux/arch/arm/kernel/signal.c
3 *
4 * Copyright (C) 1995-2009 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/errno.h>
11 #include <linux/random.h>
12 #include <linux/signal.h>
13 #include <linux/personality.h>
14 #include <linux/uaccess.h>
15 #include <linux/tracehook.h>
16 #include <linux/uprobes.h>
17 #include <linux/syscalls.h>
18
19 #include <asm/elf.h>
20 #include <asm/cacheflush.h>
21 #include <asm/traps.h>
22 #include <asm/ucontext.h>
23 #include <asm/unistd.h>
24 #include <asm/vfp.h>
25
26 extern const unsigned long sigreturn_codes[7];
27
28 static unsigned long signal_return_offset;
29
30 #ifdef CONFIG_CRUNCH
preserve_crunch_context(struct crunch_sigframe __user * frame)31 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
32 {
33 char kbuf[sizeof(*frame) + 8];
34 struct crunch_sigframe *kframe;
35
36 /* the crunch context must be 64 bit aligned */
37 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
38 kframe->magic = CRUNCH_MAGIC;
39 kframe->size = CRUNCH_STORAGE_SIZE;
40 crunch_task_copy(current_thread_info(), &kframe->storage);
41 return __copy_to_user(frame, kframe, sizeof(*frame));
42 }
43
restore_crunch_context(char __user ** auxp)44 static int restore_crunch_context(char __user **auxp)
45 {
46 struct crunch_sigframe __user *frame =
47 (struct crunch_sigframe __user *)*auxp;
48 char kbuf[sizeof(*frame) + 8];
49 struct crunch_sigframe *kframe;
50
51 /* the crunch context must be 64 bit aligned */
52 kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
53 if (__copy_from_user(kframe, frame, sizeof(*frame)))
54 return -1;
55 if (kframe->magic != CRUNCH_MAGIC ||
56 kframe->size != CRUNCH_STORAGE_SIZE)
57 return -1;
58 *auxp += CRUNCH_STORAGE_SIZE;
59 crunch_task_restore(current_thread_info(), &kframe->storage);
60 return 0;
61 }
62 #endif
63
64 #ifdef CONFIG_IWMMXT
65
preserve_iwmmxt_context(struct iwmmxt_sigframe __user * frame)66 static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
67 {
68 char kbuf[sizeof(*frame) + 8];
69 struct iwmmxt_sigframe *kframe;
70 int err = 0;
71
72 /* the iWMMXt context must be 64 bit aligned */
73 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
74
75 if (test_thread_flag(TIF_USING_IWMMXT)) {
76 kframe->magic = IWMMXT_MAGIC;
77 kframe->size = IWMMXT_STORAGE_SIZE;
78 iwmmxt_task_copy(current_thread_info(), &kframe->storage);
79 } else {
80 /*
81 * For bug-compatibility with older kernels, some space
82 * has to be reserved for iWMMXt even if it's not used.
83 * Set the magic and size appropriately so that properly
84 * written userspace can skip it reliably:
85 */
86 *kframe = (struct iwmmxt_sigframe) {
87 .magic = DUMMY_MAGIC,
88 .size = IWMMXT_STORAGE_SIZE,
89 };
90 }
91
92 err = __copy_to_user(frame, kframe, sizeof(*kframe));
93
94 return err;
95 }
96
restore_iwmmxt_context(char __user ** auxp)97 static int restore_iwmmxt_context(char __user **auxp)
98 {
99 struct iwmmxt_sigframe __user *frame =
100 (struct iwmmxt_sigframe __user *)*auxp;
101 char kbuf[sizeof(*frame) + 8];
102 struct iwmmxt_sigframe *kframe;
103
104 /* the iWMMXt context must be 64 bit aligned */
105 kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
106 if (__copy_from_user(kframe, frame, sizeof(*frame)))
107 return -1;
108
109 /*
110 * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy
111 * block is discarded for compatibility with setup_sigframe() if
112 * present, but we don't mandate its presence. If some other
113 * magic is here, it's not for us:
114 */
115 if (!test_thread_flag(TIF_USING_IWMMXT) &&
116 kframe->magic != DUMMY_MAGIC)
117 return 0;
118
119 if (kframe->size != IWMMXT_STORAGE_SIZE)
120 return -1;
121
122 if (test_thread_flag(TIF_USING_IWMMXT)) {
123 if (kframe->magic != IWMMXT_MAGIC)
124 return -1;
125
126 iwmmxt_task_restore(current_thread_info(), &kframe->storage);
127 }
128
129 *auxp += IWMMXT_STORAGE_SIZE;
130 return 0;
131 }
132
133 #endif
134
135 #ifdef CONFIG_VFP
136
preserve_vfp_context(struct vfp_sigframe __user * frame)137 static int preserve_vfp_context(struct vfp_sigframe __user *frame)
138 {
139 struct vfp_sigframe kframe;
140 int err = 0;
141
142 memset(&kframe, 0, sizeof(kframe));
143 kframe.magic = VFP_MAGIC;
144 kframe.size = VFP_STORAGE_SIZE;
145
146 err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
147 if (err)
148 return err;
149
150 return __copy_to_user(frame, &kframe, sizeof(kframe));
151 }
152
restore_vfp_context(char __user ** auxp)153 static int restore_vfp_context(char __user **auxp)
154 {
155 struct vfp_sigframe frame;
156 int err;
157
158 err = __copy_from_user(&frame, *auxp, sizeof(frame));
159 if (err)
160 return err;
161
162 if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
163 return -EINVAL;
164
165 *auxp += sizeof(frame);
166 return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
167 }
168
169 #endif
170
171 /*
172 * Do a signal return; undo the signal stack. These are aligned to 64-bit.
173 */
174 struct sigframe {
175 struct ucontext uc;
176 unsigned long retcode[2];
177 };
178
179 struct rt_sigframe {
180 struct siginfo info;
181 struct sigframe sig;
182 };
183
restore_sigframe(struct pt_regs * regs,struct sigframe __user * sf)184 static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
185 {
186 struct sigcontext context;
187 char __user *aux;
188 sigset_t set;
189 int err;
190
191 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
192 if (err == 0)
193 set_current_blocked(&set);
194
195 err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
196 if (err == 0) {
197 regs->ARM_r0 = context.arm_r0;
198 regs->ARM_r1 = context.arm_r1;
199 regs->ARM_r2 = context.arm_r2;
200 regs->ARM_r3 = context.arm_r3;
201 regs->ARM_r4 = context.arm_r4;
202 regs->ARM_r5 = context.arm_r5;
203 regs->ARM_r6 = context.arm_r6;
204 regs->ARM_r7 = context.arm_r7;
205 regs->ARM_r8 = context.arm_r8;
206 regs->ARM_r9 = context.arm_r9;
207 regs->ARM_r10 = context.arm_r10;
208 regs->ARM_fp = context.arm_fp;
209 regs->ARM_ip = context.arm_ip;
210 regs->ARM_sp = context.arm_sp;
211 regs->ARM_lr = context.arm_lr;
212 regs->ARM_pc = context.arm_pc;
213 regs->ARM_cpsr = context.arm_cpsr;
214 }
215
216 err |= !valid_user_regs(regs);
217
218 aux = (char __user *) sf->uc.uc_regspace;
219 #ifdef CONFIG_CRUNCH
220 if (err == 0)
221 err |= restore_crunch_context(&aux);
222 #endif
223 #ifdef CONFIG_IWMMXT
224 if (err == 0)
225 err |= restore_iwmmxt_context(&aux);
226 #endif
227 #ifdef CONFIG_VFP
228 if (err == 0)
229 err |= restore_vfp_context(&aux);
230 #endif
231
232 return err;
233 }
234
sys_sigreturn(struct pt_regs * regs)235 asmlinkage int sys_sigreturn(struct pt_regs *regs)
236 {
237 struct sigframe __user *frame;
238
239 /* Always make any pending restarted system calls return -EINTR */
240 current->restart_block.fn = do_no_restart_syscall;
241
242 /*
243 * Since we stacked the signal on a 64-bit boundary,
244 * then 'sp' should be word aligned here. If it's
245 * not, then the user is trying to mess with us.
246 */
247 if (regs->ARM_sp & 7)
248 goto badframe;
249
250 frame = (struct sigframe __user *)regs->ARM_sp;
251
252 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
253 goto badframe;
254
255 if (restore_sigframe(regs, frame))
256 goto badframe;
257
258 return regs->ARM_r0;
259
260 badframe:
261 force_sig(SIGSEGV, current);
262 return 0;
263 }
264
sys_rt_sigreturn(struct pt_regs * regs)265 asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
266 {
267 struct rt_sigframe __user *frame;
268
269 /* Always make any pending restarted system calls return -EINTR */
270 current->restart_block.fn = do_no_restart_syscall;
271
272 /*
273 * Since we stacked the signal on a 64-bit boundary,
274 * then 'sp' should be word aligned here. If it's
275 * not, then the user is trying to mess with us.
276 */
277 if (regs->ARM_sp & 7)
278 goto badframe;
279
280 frame = (struct rt_sigframe __user *)regs->ARM_sp;
281
282 if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
283 goto badframe;
284
285 if (restore_sigframe(regs, &frame->sig))
286 goto badframe;
287
288 if (restore_altstack(&frame->sig.uc.uc_stack))
289 goto badframe;
290
291 return regs->ARM_r0;
292
293 badframe:
294 force_sig(SIGSEGV, current);
295 return 0;
296 }
297
298 static int
setup_sigframe(struct sigframe __user * sf,struct pt_regs * regs,sigset_t * set)299 setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
300 {
301 struct aux_sigframe __user *aux;
302 struct sigcontext context;
303 int err = 0;
304
305 context = (struct sigcontext) {
306 .arm_r0 = regs->ARM_r0,
307 .arm_r1 = regs->ARM_r1,
308 .arm_r2 = regs->ARM_r2,
309 .arm_r3 = regs->ARM_r3,
310 .arm_r4 = regs->ARM_r4,
311 .arm_r5 = regs->ARM_r5,
312 .arm_r6 = regs->ARM_r6,
313 .arm_r7 = regs->ARM_r7,
314 .arm_r8 = regs->ARM_r8,
315 .arm_r9 = regs->ARM_r9,
316 .arm_r10 = regs->ARM_r10,
317 .arm_fp = regs->ARM_fp,
318 .arm_ip = regs->ARM_ip,
319 .arm_sp = regs->ARM_sp,
320 .arm_lr = regs->ARM_lr,
321 .arm_pc = regs->ARM_pc,
322 .arm_cpsr = regs->ARM_cpsr,
323
324 .trap_no = current->thread.trap_no,
325 .error_code = current->thread.error_code,
326 .fault_address = current->thread.address,
327 .oldmask = set->sig[0],
328 };
329
330 err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
331
332 err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
333
334 aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
335 #ifdef CONFIG_CRUNCH
336 if (err == 0)
337 err |= preserve_crunch_context(&aux->crunch);
338 #endif
339 #ifdef CONFIG_IWMMXT
340 if (err == 0)
341 err |= preserve_iwmmxt_context(&aux->iwmmxt);
342 #endif
343 #ifdef CONFIG_VFP
344 if (err == 0)
345 err |= preserve_vfp_context(&aux->vfp);
346 #endif
347 err |= __put_user(0, &aux->end_magic);
348
349 return err;
350 }
351
352 static inline void __user *
get_sigframe(struct ksignal * ksig,struct pt_regs * regs,int framesize)353 get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize)
354 {
355 unsigned long sp = sigsp(regs->ARM_sp, ksig);
356 void __user *frame;
357
358 /*
359 * ATPCS B01 mandates 8-byte alignment
360 */
361 frame = (void __user *)((sp - framesize) & ~7);
362
363 /*
364 * Check that we can actually write to the signal frame.
365 */
366 if (!access_ok(VERIFY_WRITE, frame, framesize))
367 frame = NULL;
368
369 return frame;
370 }
371
372 static int
setup_return(struct pt_regs * regs,struct ksignal * ksig,unsigned long __user * rc,void __user * frame)373 setup_return(struct pt_regs *regs, struct ksignal *ksig,
374 unsigned long __user *rc, void __user *frame)
375 {
376 unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
377 unsigned long retcode;
378 int thumb = 0;
379 unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
380
381 cpsr |= PSR_ENDSTATE;
382
383 /*
384 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
385 */
386 if (ksig->ka.sa.sa_flags & SA_THIRTYTWO)
387 cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
388
389 #ifdef CONFIG_ARM_THUMB
390 if (elf_hwcap & HWCAP_THUMB) {
391 /*
392 * The LSB of the handler determines if we're going to
393 * be using THUMB or ARM mode for this signal handler.
394 */
395 thumb = handler & 1;
396
397 /*
398 * Clear the If-Then Thumb-2 execution state. ARM spec
399 * requires this to be all 000s in ARM mode. Snapdragon
400 * S4/Krait misbehaves on a Thumb=>ARM signal transition
401 * without this.
402 *
403 * We must do this whenever we are running on a Thumb-2
404 * capable CPU, which includes ARMv6T2. However, we elect
405 * to always do this to simplify the code; this field is
406 * marked UNK/SBZP for older architectures.
407 */
408 cpsr &= ~PSR_IT_MASK;
409
410 if (thumb) {
411 cpsr |= PSR_T_BIT;
412 } else
413 cpsr &= ~PSR_T_BIT;
414 }
415 #endif
416
417 if (ksig->ka.sa.sa_flags & SA_RESTORER) {
418 retcode = (unsigned long)ksig->ka.sa.sa_restorer;
419 } else {
420 unsigned int idx = thumb << 1;
421
422 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
423 idx += 3;
424
425 /*
426 * Put the sigreturn code on the stack no matter which return
427 * mechanism we use in order to remain ABI compliant
428 */
429 if (__put_user(sigreturn_codes[idx], rc) ||
430 __put_user(sigreturn_codes[idx+1], rc+1))
431 return 1;
432
433 #ifdef CONFIG_MMU
434 if (cpsr & MODE32_BIT) {
435 struct mm_struct *mm = current->mm;
436
437 /*
438 * 32-bit code can use the signal return page
439 * except when the MPU has protected the vectors
440 * page from PL0
441 */
442 retcode = mm->context.sigpage + signal_return_offset +
443 (idx << 2) + thumb;
444 } else
445 #endif
446 {
447 /*
448 * Ensure that the instruction cache sees
449 * the return code written onto the stack.
450 */
451 flush_icache_range((unsigned long)rc,
452 (unsigned long)(rc + 2));
453
454 retcode = ((unsigned long)rc) + thumb;
455 }
456 }
457
458 regs->ARM_r0 = ksig->sig;
459 regs->ARM_sp = (unsigned long)frame;
460 regs->ARM_lr = retcode;
461 regs->ARM_pc = handler;
462 regs->ARM_cpsr = cpsr;
463
464 return 0;
465 }
466
467 static int
setup_frame(struct ksignal * ksig,sigset_t * set,struct pt_regs * regs)468 setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
469 {
470 struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
471 int err = 0;
472
473 if (!frame)
474 return 1;
475
476 /*
477 * Set uc.uc_flags to a value which sc.trap_no would never have.
478 */
479 err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
480
481 err |= setup_sigframe(frame, regs, set);
482 if (err == 0)
483 err = setup_return(regs, ksig, frame->retcode, frame);
484
485 return err;
486 }
487
488 static int
setup_rt_frame(struct ksignal * ksig,sigset_t * set,struct pt_regs * regs)489 setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
490 {
491 struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
492 int err = 0;
493
494 if (!frame)
495 return 1;
496
497 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
498
499 err |= __put_user(0, &frame->sig.uc.uc_flags);
500 err |= __put_user(NULL, &frame->sig.uc.uc_link);
501
502 err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
503 err |= setup_sigframe(&frame->sig, regs, set);
504 if (err == 0)
505 err = setup_return(regs, ksig, frame->sig.retcode, frame);
506
507 if (err == 0) {
508 /*
509 * For realtime signals we must also set the second and third
510 * arguments for the signal handler.
511 * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
512 */
513 regs->ARM_r1 = (unsigned long)&frame->info;
514 regs->ARM_r2 = (unsigned long)&frame->sig.uc;
515 }
516
517 return err;
518 }
519
520 /*
521 * OK, we're invoking a handler
522 */
handle_signal(struct ksignal * ksig,struct pt_regs * regs)523 static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
524 {
525 sigset_t *oldset = sigmask_to_save();
526 int ret;
527
528 /*
529 * Set up the stack frame
530 */
531 if (ksig->ka.sa.sa_flags & SA_SIGINFO)
532 ret = setup_rt_frame(ksig, oldset, regs);
533 else
534 ret = setup_frame(ksig, oldset, regs);
535
536 /*
537 * Check that the resulting registers are actually sane.
538 */
539 ret |= !valid_user_regs(regs);
540
541 signal_setup_done(ret, ksig, 0);
542 }
543
544 /*
545 * Note that 'init' is a special process: it doesn't get signals it doesn't
546 * want to handle. Thus you cannot kill init even with a SIGKILL even by
547 * mistake.
548 *
549 * Note that we go through the signals twice: once to check the signals that
550 * the kernel can handle, and then we build all the user-level signal handling
551 * stack-frames in one go after that.
552 */
do_signal(struct pt_regs * regs,int syscall)553 static int do_signal(struct pt_regs *regs, int syscall)
554 {
555 unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
556 struct ksignal ksig;
557 int restart = 0;
558
559 /*
560 * If we were from a system call, check for system call restarting...
561 */
562 if (syscall) {
563 continue_addr = regs->ARM_pc;
564 restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
565 retval = regs->ARM_r0;
566
567 /*
568 * Prepare for system call restart. We do this here so that a
569 * debugger will see the already changed PSW.
570 */
571 switch (retval) {
572 case -ERESTART_RESTARTBLOCK:
573 restart -= 2;
574 case -ERESTARTNOHAND:
575 case -ERESTARTSYS:
576 case -ERESTARTNOINTR:
577 restart++;
578 regs->ARM_r0 = regs->ARM_ORIG_r0;
579 regs->ARM_pc = restart_addr;
580 break;
581 }
582 }
583
584 /*
585 * Get the signal to deliver. When running under ptrace, at this
586 * point the debugger may change all our registers ...
587 */
588 /*
589 * Depending on the signal settings we may need to revert the
590 * decision to restart the system call. But skip this if a
591 * debugger has chosen to restart at a different PC.
592 */
593 if (get_signal(&ksig)) {
594 /* handler */
595 if (unlikely(restart) && regs->ARM_pc == restart_addr) {
596 if (retval == -ERESTARTNOHAND ||
597 retval == -ERESTART_RESTARTBLOCK
598 || (retval == -ERESTARTSYS
599 && !(ksig.ka.sa.sa_flags & SA_RESTART))) {
600 regs->ARM_r0 = -EINTR;
601 regs->ARM_pc = continue_addr;
602 }
603 }
604 handle_signal(&ksig, regs);
605 } else {
606 /* no handler */
607 restore_saved_sigmask();
608 if (unlikely(restart) && regs->ARM_pc == restart_addr) {
609 regs->ARM_pc = continue_addr;
610 return restart;
611 }
612 }
613 return 0;
614 }
615
616 asmlinkage int
do_work_pending(struct pt_regs * regs,unsigned int thread_flags,int syscall)617 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
618 {
619 /*
620 * The assembly code enters us with IRQs off, but it hasn't
621 * informed the tracing code of that for efficiency reasons.
622 * Update the trace code with the current status.
623 */
624 trace_hardirqs_off();
625 do {
626 if (likely(thread_flags & _TIF_NEED_RESCHED)) {
627 schedule();
628 } else {
629 if (unlikely(!user_mode(regs)))
630 return 0;
631 local_irq_enable();
632 if (thread_flags & _TIF_SIGPENDING) {
633 int restart = do_signal(regs, syscall);
634 if (unlikely(restart)) {
635 /*
636 * Restart without handlers.
637 * Deal with it without leaving
638 * the kernel space.
639 */
640 return restart;
641 }
642 syscall = 0;
643 } else if (thread_flags & _TIF_UPROBE) {
644 uprobe_notify_resume(regs);
645 } else {
646 clear_thread_flag(TIF_NOTIFY_RESUME);
647 tracehook_notify_resume(regs);
648 }
649 }
650 local_irq_disable();
651 thread_flags = current_thread_info()->flags;
652 } while (thread_flags & _TIF_WORK_MASK);
653 return 0;
654 }
655
get_signal_page(void)656 struct page *get_signal_page(void)
657 {
658 unsigned long ptr;
659 unsigned offset;
660 struct page *page;
661 void *addr;
662
663 page = alloc_pages(GFP_KERNEL, 0);
664
665 if (!page)
666 return NULL;
667
668 addr = page_address(page);
669
670 /* Give the signal return code some randomness */
671 offset = 0x200 + (get_random_int() & 0x7fc);
672 signal_return_offset = offset;
673
674 /*
675 * Copy signal return handlers into the vector page, and
676 * set sigreturn to be a pointer to these.
677 */
678 memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
679
680 ptr = (unsigned long)addr + offset;
681 flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
682
683 return page;
684 }
685
686 /* Defer to generic check */
addr_limit_check_failed(void)687 asmlinkage void addr_limit_check_failed(void)
688 {
689 addr_limit_user_check();
690 }
691