1 /*
2 * linux/kernel/sys.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/export.h>
8 #include <linux/mm.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/reboot.h>
12 #include <linux/prctl.h>
13 #include <linux/highuid.h>
14 #include <linux/fs.h>
15 #include <linux/kmod.h>
16 #include <linux/perf_event.h>
17 #include <linux/resource.h>
18 #include <linux/kernel.h>
19 #include <linux/kexec.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/seccomp.h>
35 #include <linux/cpu.h>
36 #include <linux/personality.h>
37 #include <linux/ptrace.h>
38 #include <linux/fs_struct.h>
39 #include <linux/file.h>
40 #include <linux/mount.h>
41 #include <linux/gfp.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/version.h>
44 #include <linux/ctype.h>
45 #include <linux/mm.h>
46 #include <linux/mempolicy.h>
47 #include <linux/sched.h>
48
49 #include <linux/compat.h>
50 #include <linux/syscalls.h>
51 #include <linux/kprobes.h>
52 #include <linux/user_namespace.h>
53 #include <linux/binfmts.h>
54
55 #include <linux/sched.h>
56 #include <linux/rcupdate.h>
57 #include <linux/uidgid.h>
58 #include <linux/cred.h>
59
60 #include <linux/kmsg_dump.h>
61 /* Move somewhere else to avoid recompiling? */
62 #include <generated/utsrelease.h>
63
64 #include <asm/uaccess.h>
65 #include <asm/io.h>
66 #include <asm/unistd.h>
67
68 #ifndef SET_UNALIGN_CTL
69 # define SET_UNALIGN_CTL(a,b) (-EINVAL)
70 #endif
71 #ifndef GET_UNALIGN_CTL
72 # define GET_UNALIGN_CTL(a,b) (-EINVAL)
73 #endif
74 #ifndef SET_FPEMU_CTL
75 # define SET_FPEMU_CTL(a,b) (-EINVAL)
76 #endif
77 #ifndef GET_FPEMU_CTL
78 # define GET_FPEMU_CTL(a,b) (-EINVAL)
79 #endif
80 #ifndef SET_FPEXC_CTL
81 # define SET_FPEXC_CTL(a,b) (-EINVAL)
82 #endif
83 #ifndef GET_FPEXC_CTL
84 # define GET_FPEXC_CTL(a,b) (-EINVAL)
85 #endif
86 #ifndef GET_ENDIAN
87 # define GET_ENDIAN(a,b) (-EINVAL)
88 #endif
89 #ifndef SET_ENDIAN
90 # define SET_ENDIAN(a,b) (-EINVAL)
91 #endif
92 #ifndef GET_TSC_CTL
93 # define GET_TSC_CTL(a) (-EINVAL)
94 #endif
95 #ifndef SET_TSC_CTL
96 # define SET_TSC_CTL(a) (-EINVAL)
97 #endif
98
99 #ifndef GET_FP_MODE
100 # define GET_FP_MODE(a) (-EINVAL)
101 #endif
102 #ifndef SET_FP_MODE
103 # define SET_FP_MODE(a,b) (-EINVAL)
104 #endif
105
106 /*
107 * this is where the system-wide overflow UID and GID are defined, for
108 * architectures that now have 32-bit UID/GID but didn't in the past
109 */
110
111 int overflowuid = DEFAULT_OVERFLOWUID;
112 int overflowgid = DEFAULT_OVERFLOWGID;
113
114 EXPORT_SYMBOL(overflowuid);
115 EXPORT_SYMBOL(overflowgid);
116
117 /*
118 * the same as above, but for filesystems which can only store a 16-bit
119 * UID and GID. as such, this is needed on all architectures
120 */
121
122 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
123 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
124
125 EXPORT_SYMBOL(fs_overflowuid);
126 EXPORT_SYMBOL(fs_overflowgid);
127
128 /*
129 * this indicates whether you can reboot with ctrl-alt-del: the default is yes
130 */
131
132 int C_A_D = 1;
133 struct pid *cad_pid;
134 EXPORT_SYMBOL(cad_pid);
135
136 /*
137 * If set, this is used for preparing the system to power off.
138 */
139
140 void (*pm_power_off_prepare)(void);
141
142 /*
143 * Returns true if current's euid is same as p's uid or euid,
144 * or has CAP_SYS_NICE to p's user_ns.
145 *
146 * Called with rcu_read_lock, creds are safe
147 */
set_one_prio_perm(struct task_struct * p)148 static bool set_one_prio_perm(struct task_struct *p)
149 {
150 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
151
152 if (uid_eq(pcred->uid, cred->euid) ||
153 uid_eq(pcred->euid, cred->euid))
154 return true;
155 if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
156 return true;
157 return false;
158 }
159
160 /*
161 * set the priority of a task
162 * - the caller must hold the RCU read lock
163 */
set_one_prio(struct task_struct * p,int niceval,int error)164 static int set_one_prio(struct task_struct *p, int niceval, int error)
165 {
166 int no_nice;
167
168 if (!set_one_prio_perm(p)) {
169 error = -EPERM;
170 goto out;
171 }
172 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
173 error = -EACCES;
174 goto out;
175 }
176 no_nice = security_task_setnice(p, niceval);
177 if (no_nice) {
178 error = no_nice;
179 goto out;
180 }
181 if (error == -ESRCH)
182 error = 0;
183 set_user_nice(p, niceval);
184 out:
185 return error;
186 }
187
SYSCALL_DEFINE3(setpriority,int,which,int,who,int,niceval)188 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
189 {
190 struct task_struct *g, *p;
191 struct user_struct *user;
192 const struct cred *cred = current_cred();
193 int error = -EINVAL;
194 struct pid *pgrp;
195 kuid_t uid;
196
197 if (which > PRIO_USER || which < PRIO_PROCESS)
198 goto out;
199
200 /* normalize: avoid signed division (rounding problems) */
201 error = -ESRCH;
202 if (niceval < -20)
203 niceval = -20;
204 if (niceval > 19)
205 niceval = 19;
206
207 rcu_read_lock();
208 read_lock(&tasklist_lock);
209 switch (which) {
210 case PRIO_PROCESS:
211 if (who)
212 p = find_task_by_vpid(who);
213 else
214 p = current;
215 if (p)
216 error = set_one_prio(p, niceval, error);
217 break;
218 case PRIO_PGRP:
219 if (who)
220 pgrp = find_vpid(who);
221 else
222 pgrp = task_pgrp(current);
223 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
224 error = set_one_prio(p, niceval, error);
225 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
226 break;
227 case PRIO_USER:
228 uid = make_kuid(cred->user_ns, who);
229 user = cred->user;
230 if (!who)
231 uid = cred->uid;
232 else if (!uid_eq(uid, cred->uid) &&
233 !(user = find_user(uid)))
234 goto out_unlock; /* No processes for this user */
235
236 do_each_thread(g, p) {
237 if (uid_eq(task_uid(p), uid))
238 error = set_one_prio(p, niceval, error);
239 } while_each_thread(g, p);
240 if (!uid_eq(uid, cred->uid))
241 free_uid(user); /* For find_user() */
242 break;
243 }
244 out_unlock:
245 read_unlock(&tasklist_lock);
246 rcu_read_unlock();
247 out:
248 return error;
249 }
250
251 /*
252 * Ugh. To avoid negative return values, "getpriority()" will
253 * not return the normal nice-value, but a negated value that
254 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
255 * to stay compatible.
256 */
SYSCALL_DEFINE2(getpriority,int,which,int,who)257 SYSCALL_DEFINE2(getpriority, int, which, int, who)
258 {
259 struct task_struct *g, *p;
260 struct user_struct *user;
261 const struct cred *cred = current_cred();
262 long niceval, retval = -ESRCH;
263 struct pid *pgrp;
264 kuid_t uid;
265
266 if (which > PRIO_USER || which < PRIO_PROCESS)
267 return -EINVAL;
268
269 rcu_read_lock();
270 read_lock(&tasklist_lock);
271 switch (which) {
272 case PRIO_PROCESS:
273 if (who)
274 p = find_task_by_vpid(who);
275 else
276 p = current;
277 if (p) {
278 niceval = 20 - task_nice(p);
279 if (niceval > retval)
280 retval = niceval;
281 }
282 break;
283 case PRIO_PGRP:
284 if (who)
285 pgrp = find_vpid(who);
286 else
287 pgrp = task_pgrp(current);
288 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
289 niceval = 20 - task_nice(p);
290 if (niceval > retval)
291 retval = niceval;
292 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
293 break;
294 case PRIO_USER:
295 uid = make_kuid(cred->user_ns, who);
296 user = cred->user;
297 if (!who)
298 uid = cred->uid;
299 else if (!uid_eq(uid, cred->uid) &&
300 !(user = find_user(uid)))
301 goto out_unlock; /* No processes for this user */
302
303 do_each_thread(g, p) {
304 if (uid_eq(task_uid(p), uid)) {
305 niceval = 20 - task_nice(p);
306 if (niceval > retval)
307 retval = niceval;
308 }
309 } while_each_thread(g, p);
310 if (!uid_eq(uid, cred->uid))
311 free_uid(user); /* for find_user() */
312 break;
313 }
314 out_unlock:
315 read_unlock(&tasklist_lock);
316 rcu_read_unlock();
317
318 return retval;
319 }
320
321 /**
322 * emergency_restart - reboot the system
323 *
324 * Without shutting down any hardware or taking any locks
325 * reboot the system. This is called when we know we are in
326 * trouble so this is our best effort to reboot. This is
327 * safe to call in interrupt context.
328 */
emergency_restart(void)329 void emergency_restart(void)
330 {
331 kmsg_dump(KMSG_DUMP_EMERG);
332 machine_emergency_restart();
333 }
334 EXPORT_SYMBOL_GPL(emergency_restart);
335
kernel_restart_prepare(char * cmd)336 void kernel_restart_prepare(char *cmd)
337 {
338 blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
339 system_state = SYSTEM_RESTART;
340 usermodehelper_disable();
341 device_shutdown();
342 }
343
344 /**
345 * register_reboot_notifier - Register function to be called at reboot time
346 * @nb: Info about notifier function to be called
347 *
348 * Registers a function with the list of functions
349 * to be called at reboot time.
350 *
351 * Currently always returns zero, as blocking_notifier_chain_register()
352 * always returns zero.
353 */
register_reboot_notifier(struct notifier_block * nb)354 int register_reboot_notifier(struct notifier_block *nb)
355 {
356 return blocking_notifier_chain_register(&reboot_notifier_list, nb);
357 }
358 EXPORT_SYMBOL(register_reboot_notifier);
359
360 /**
361 * unregister_reboot_notifier - Unregister previously registered reboot notifier
362 * @nb: Hook to be unregistered
363 *
364 * Unregisters a previously registered reboot
365 * notifier function.
366 *
367 * Returns zero on success, or %-ENOENT on failure.
368 */
unregister_reboot_notifier(struct notifier_block * nb)369 int unregister_reboot_notifier(struct notifier_block *nb)
370 {
371 return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
372 }
373 EXPORT_SYMBOL(unregister_reboot_notifier);
374
375 /* Add backwards compatibility for stable trees. */
376 #ifndef PF_NO_SETAFFINITY
377 #define PF_NO_SETAFFINITY PF_THREAD_BOUND
378 #endif
379
migrate_to_reboot_cpu(void)380 static void migrate_to_reboot_cpu(void)
381 {
382 /* The boot cpu is always logical cpu 0 */
383 int cpu = 0;
384
385 cpu_hotplug_disable();
386
387 /* Make certain the cpu I'm about to reboot on is online */
388 if (!cpu_online(cpu))
389 cpu = cpumask_first(cpu_online_mask);
390
391 /* Prevent races with other tasks migrating this task */
392 current->flags |= PF_NO_SETAFFINITY;
393
394 /* Make certain I only run on the appropriate processor */
395 set_cpus_allowed_ptr(current, cpumask_of(cpu));
396 }
397
398 /**
399 * kernel_restart - reboot the system
400 * @cmd: pointer to buffer containing command to execute for restart
401 * or %NULL
402 *
403 * Shutdown everything and perform a clean reboot.
404 * This is not safe to call in interrupt context.
405 */
kernel_restart(char * cmd)406 void kernel_restart(char *cmd)
407 {
408 kernel_restart_prepare(cmd);
409 migrate_to_reboot_cpu();
410 syscore_shutdown();
411 if (!cmd)
412 printk(KERN_EMERG "Restarting system.\n");
413 else
414 printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
415 kmsg_dump(KMSG_DUMP_RESTART);
416 machine_restart(cmd);
417 }
418 EXPORT_SYMBOL_GPL(kernel_restart);
419
kernel_shutdown_prepare(enum system_states state)420 static void kernel_shutdown_prepare(enum system_states state)
421 {
422 blocking_notifier_call_chain(&reboot_notifier_list,
423 (state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
424 system_state = state;
425 usermodehelper_disable();
426 device_shutdown();
427 }
428 /**
429 * kernel_halt - halt the system
430 *
431 * Shutdown everything and perform a clean system halt.
432 */
kernel_halt(void)433 void kernel_halt(void)
434 {
435 kernel_shutdown_prepare(SYSTEM_HALT);
436 migrate_to_reboot_cpu();
437 syscore_shutdown();
438 printk(KERN_EMERG "System halted.\n");
439 kmsg_dump(KMSG_DUMP_HALT);
440 machine_halt();
441 }
442
443 EXPORT_SYMBOL_GPL(kernel_halt);
444
445 /**
446 * kernel_power_off - power_off the system
447 *
448 * Shutdown everything and perform a clean system power_off.
449 */
kernel_power_off(void)450 void kernel_power_off(void)
451 {
452 kernel_shutdown_prepare(SYSTEM_POWER_OFF);
453 if (pm_power_off_prepare)
454 pm_power_off_prepare();
455 migrate_to_reboot_cpu();
456 syscore_shutdown();
457 printk(KERN_EMERG "Power down.\n");
458 kmsg_dump(KMSG_DUMP_POWEROFF);
459 machine_power_off();
460 }
461 EXPORT_SYMBOL_GPL(kernel_power_off);
462
463 static DEFINE_MUTEX(reboot_mutex);
464
465 /*
466 * Reboot system call: for obvious reasons only root may call it,
467 * and even root needs to set up some magic numbers in the registers
468 * so that some mistake won't make this reboot the whole machine.
469 * You can also set the meaning of the ctrl-alt-del-key here.
470 *
471 * reboot doesn't sync: do that yourself before calling this.
472 */
SYSCALL_DEFINE4(reboot,int,magic1,int,magic2,unsigned int,cmd,void __user *,arg)473 SYSCALL_DEFINE4(reboot, int, magic1, int, magic2, unsigned int, cmd,
474 void __user *, arg)
475 {
476 struct pid_namespace *pid_ns = task_active_pid_ns(current);
477 char buffer[256];
478 int ret = 0;
479
480 /* We only trust the superuser with rebooting the system. */
481 if (!ns_capable(pid_ns->user_ns, CAP_SYS_BOOT))
482 return -EPERM;
483
484 /* For safety, we require "magic" arguments. */
485 if (magic1 != LINUX_REBOOT_MAGIC1 ||
486 (magic2 != LINUX_REBOOT_MAGIC2 &&
487 magic2 != LINUX_REBOOT_MAGIC2A &&
488 magic2 != LINUX_REBOOT_MAGIC2B &&
489 magic2 != LINUX_REBOOT_MAGIC2C))
490 return -EINVAL;
491
492 /*
493 * If pid namespaces are enabled and the current task is in a child
494 * pid_namespace, the command is handled by reboot_pid_ns() which will
495 * call do_exit().
496 */
497 ret = reboot_pid_ns(pid_ns, cmd);
498 if (ret)
499 return ret;
500
501 /* Instead of trying to make the power_off code look like
502 * halt when pm_power_off is not set do it the easy way.
503 */
504 if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
505 cmd = LINUX_REBOOT_CMD_HALT;
506
507 mutex_lock(&reboot_mutex);
508 switch (cmd) {
509 case LINUX_REBOOT_CMD_RESTART:
510 kernel_restart(NULL);
511 break;
512
513 case LINUX_REBOOT_CMD_CAD_ON:
514 C_A_D = 1;
515 break;
516
517 case LINUX_REBOOT_CMD_CAD_OFF:
518 C_A_D = 0;
519 break;
520
521 case LINUX_REBOOT_CMD_HALT:
522 kernel_halt();
523 do_exit(0);
524 panic("cannot halt");
525
526 case LINUX_REBOOT_CMD_POWER_OFF:
527 kernel_power_off();
528 do_exit(0);
529 break;
530
531 case LINUX_REBOOT_CMD_RESTART2:
532 if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
533 ret = -EFAULT;
534 break;
535 }
536 buffer[sizeof(buffer) - 1] = '\0';
537
538 kernel_restart(buffer);
539 break;
540
541 #ifdef CONFIG_KEXEC
542 case LINUX_REBOOT_CMD_KEXEC:
543 ret = kernel_kexec();
544 break;
545 #endif
546
547 #ifdef CONFIG_HIBERNATION
548 case LINUX_REBOOT_CMD_SW_SUSPEND:
549 ret = hibernate();
550 break;
551 #endif
552
553 default:
554 ret = -EINVAL;
555 break;
556 }
557 mutex_unlock(&reboot_mutex);
558 return ret;
559 }
560
deferred_cad(struct work_struct * dummy)561 static void deferred_cad(struct work_struct *dummy)
562 {
563 kernel_restart(NULL);
564 }
565
566 /*
567 * This function gets called by ctrl-alt-del - ie the keyboard interrupt.
568 * As it's called within an interrupt, it may NOT sync: the only choice
569 * is whether to reboot at once, or just ignore the ctrl-alt-del.
570 */
ctrl_alt_del(void)571 void ctrl_alt_del(void)
572 {
573 static DECLARE_WORK(cad_work, deferred_cad);
574
575 if (C_A_D)
576 schedule_work(&cad_work);
577 else
578 kill_cad_pid(SIGINT, 1);
579 }
580
581 /*
582 * Unprivileged users may change the real gid to the effective gid
583 * or vice versa. (BSD-style)
584 *
585 * If you set the real gid at all, or set the effective gid to a value not
586 * equal to the real gid, then the saved gid is set to the new effective gid.
587 *
588 * This makes it possible for a setgid program to completely drop its
589 * privileges, which is often a useful assertion to make when you are doing
590 * a security audit over a program.
591 *
592 * The general idea is that a program which uses just setregid() will be
593 * 100% compatible with BSD. A program which uses just setgid() will be
594 * 100% compatible with POSIX with saved IDs.
595 *
596 * SMP: There are not races, the GIDs are checked only by filesystem
597 * operations (as far as semantic preservation is concerned).
598 */
SYSCALL_DEFINE2(setregid,gid_t,rgid,gid_t,egid)599 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
600 {
601 struct user_namespace *ns = current_user_ns();
602 const struct cred *old;
603 struct cred *new;
604 int retval;
605 kgid_t krgid, kegid;
606
607 krgid = make_kgid(ns, rgid);
608 kegid = make_kgid(ns, egid);
609
610 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
611 return -EINVAL;
612 if ((egid != (gid_t) -1) && !gid_valid(kegid))
613 return -EINVAL;
614
615 new = prepare_creds();
616 if (!new)
617 return -ENOMEM;
618 old = current_cred();
619
620 retval = -EPERM;
621 if (rgid != (gid_t) -1) {
622 if (gid_eq(old->gid, krgid) ||
623 gid_eq(old->egid, krgid) ||
624 nsown_capable(CAP_SETGID))
625 new->gid = krgid;
626 else
627 goto error;
628 }
629 if (egid != (gid_t) -1) {
630 if (gid_eq(old->gid, kegid) ||
631 gid_eq(old->egid, kegid) ||
632 gid_eq(old->sgid, kegid) ||
633 nsown_capable(CAP_SETGID))
634 new->egid = kegid;
635 else
636 goto error;
637 }
638
639 if (rgid != (gid_t) -1 ||
640 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
641 new->sgid = new->egid;
642 new->fsgid = new->egid;
643
644 return commit_creds(new);
645
646 error:
647 abort_creds(new);
648 return retval;
649 }
650
651 /*
652 * setgid() is implemented like SysV w/ SAVED_IDS
653 *
654 * SMP: Same implicit races as above.
655 */
SYSCALL_DEFINE1(setgid,gid_t,gid)656 SYSCALL_DEFINE1(setgid, gid_t, gid)
657 {
658 struct user_namespace *ns = current_user_ns();
659 const struct cred *old;
660 struct cred *new;
661 int retval;
662 kgid_t kgid;
663
664 kgid = make_kgid(ns, gid);
665 if (!gid_valid(kgid))
666 return -EINVAL;
667
668 new = prepare_creds();
669 if (!new)
670 return -ENOMEM;
671 old = current_cred();
672
673 retval = -EPERM;
674 if (nsown_capable(CAP_SETGID))
675 new->gid = new->egid = new->sgid = new->fsgid = kgid;
676 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
677 new->egid = new->fsgid = kgid;
678 else
679 goto error;
680
681 return commit_creds(new);
682
683 error:
684 abort_creds(new);
685 return retval;
686 }
687
688 /*
689 * change the user struct in a credentials set to match the new UID
690 */
set_user(struct cred * new)691 static int set_user(struct cred *new)
692 {
693 struct user_struct *new_user;
694
695 new_user = alloc_uid(new->uid);
696 if (!new_user)
697 return -EAGAIN;
698
699 /*
700 * We don't fail in case of NPROC limit excess here because too many
701 * poorly written programs don't check set*uid() return code, assuming
702 * it never fails if called by root. We may still enforce NPROC limit
703 * for programs doing set*uid()+execve() by harmlessly deferring the
704 * failure to the execve() stage.
705 */
706 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
707 new_user != INIT_USER)
708 current->flags |= PF_NPROC_EXCEEDED;
709 else
710 current->flags &= ~PF_NPROC_EXCEEDED;
711
712 free_uid(new->user);
713 new->user = new_user;
714 return 0;
715 }
716
717 /*
718 * Unprivileged users may change the real uid to the effective uid
719 * or vice versa. (BSD-style)
720 *
721 * If you set the real uid at all, or set the effective uid to a value not
722 * equal to the real uid, then the saved uid is set to the new effective uid.
723 *
724 * This makes it possible for a setuid program to completely drop its
725 * privileges, which is often a useful assertion to make when you are doing
726 * a security audit over a program.
727 *
728 * The general idea is that a program which uses just setreuid() will be
729 * 100% compatible with BSD. A program which uses just setuid() will be
730 * 100% compatible with POSIX with saved IDs.
731 */
SYSCALL_DEFINE2(setreuid,uid_t,ruid,uid_t,euid)732 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
733 {
734 struct user_namespace *ns = current_user_ns();
735 const struct cred *old;
736 struct cred *new;
737 int retval;
738 kuid_t kruid, keuid;
739
740 kruid = make_kuid(ns, ruid);
741 keuid = make_kuid(ns, euid);
742
743 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
744 return -EINVAL;
745 if ((euid != (uid_t) -1) && !uid_valid(keuid))
746 return -EINVAL;
747
748 new = prepare_creds();
749 if (!new)
750 return -ENOMEM;
751 old = current_cred();
752
753 retval = -EPERM;
754 if (ruid != (uid_t) -1) {
755 new->uid = kruid;
756 if (!uid_eq(old->uid, kruid) &&
757 !uid_eq(old->euid, kruid) &&
758 !nsown_capable(CAP_SETUID))
759 goto error;
760 }
761
762 if (euid != (uid_t) -1) {
763 new->euid = keuid;
764 if (!uid_eq(old->uid, keuid) &&
765 !uid_eq(old->euid, keuid) &&
766 !uid_eq(old->suid, keuid) &&
767 !nsown_capable(CAP_SETUID))
768 goto error;
769 }
770
771 if (!uid_eq(new->uid, old->uid)) {
772 retval = set_user(new);
773 if (retval < 0)
774 goto error;
775 }
776 if (ruid != (uid_t) -1 ||
777 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
778 new->suid = new->euid;
779 new->fsuid = new->euid;
780
781 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
782 if (retval < 0)
783 goto error;
784
785 return commit_creds(new);
786
787 error:
788 abort_creds(new);
789 return retval;
790 }
791
792 /*
793 * setuid() is implemented like SysV with SAVED_IDS
794 *
795 * Note that SAVED_ID's is deficient in that a setuid root program
796 * like sendmail, for example, cannot set its uid to be a normal
797 * user and then switch back, because if you're root, setuid() sets
798 * the saved uid too. If you don't like this, blame the bright people
799 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
800 * will allow a root program to temporarily drop privileges and be able to
801 * regain them by swapping the real and effective uid.
802 */
SYSCALL_DEFINE1(setuid,uid_t,uid)803 SYSCALL_DEFINE1(setuid, uid_t, uid)
804 {
805 struct user_namespace *ns = current_user_ns();
806 const struct cred *old;
807 struct cred *new;
808 int retval;
809 kuid_t kuid;
810
811 kuid = make_kuid(ns, uid);
812 if (!uid_valid(kuid))
813 return -EINVAL;
814
815 new = prepare_creds();
816 if (!new)
817 return -ENOMEM;
818 old = current_cred();
819
820 retval = -EPERM;
821 if (nsown_capable(CAP_SETUID)) {
822 new->suid = new->uid = kuid;
823 if (!uid_eq(kuid, old->uid)) {
824 retval = set_user(new);
825 if (retval < 0)
826 goto error;
827 }
828 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
829 goto error;
830 }
831
832 new->fsuid = new->euid = kuid;
833
834 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
835 if (retval < 0)
836 goto error;
837
838 return commit_creds(new);
839
840 error:
841 abort_creds(new);
842 return retval;
843 }
844
845
846 /*
847 * This function implements a generic ability to update ruid, euid,
848 * and suid. This allows you to implement the 4.4 compatible seteuid().
849 */
SYSCALL_DEFINE3(setresuid,uid_t,ruid,uid_t,euid,uid_t,suid)850 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
851 {
852 struct user_namespace *ns = current_user_ns();
853 const struct cred *old;
854 struct cred *new;
855 int retval;
856 kuid_t kruid, keuid, ksuid;
857
858 kruid = make_kuid(ns, ruid);
859 keuid = make_kuid(ns, euid);
860 ksuid = make_kuid(ns, suid);
861
862 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
863 return -EINVAL;
864
865 if ((euid != (uid_t) -1) && !uid_valid(keuid))
866 return -EINVAL;
867
868 if ((suid != (uid_t) -1) && !uid_valid(ksuid))
869 return -EINVAL;
870
871 new = prepare_creds();
872 if (!new)
873 return -ENOMEM;
874
875 old = current_cred();
876
877 retval = -EPERM;
878 if (!nsown_capable(CAP_SETUID)) {
879 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
880 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
881 goto error;
882 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
883 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
884 goto error;
885 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
886 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
887 goto error;
888 }
889
890 if (ruid != (uid_t) -1) {
891 new->uid = kruid;
892 if (!uid_eq(kruid, old->uid)) {
893 retval = set_user(new);
894 if (retval < 0)
895 goto error;
896 }
897 }
898 if (euid != (uid_t) -1)
899 new->euid = keuid;
900 if (suid != (uid_t) -1)
901 new->suid = ksuid;
902 new->fsuid = new->euid;
903
904 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
905 if (retval < 0)
906 goto error;
907
908 return commit_creds(new);
909
910 error:
911 abort_creds(new);
912 return retval;
913 }
914
SYSCALL_DEFINE3(getresuid,uid_t __user *,ruidp,uid_t __user *,euidp,uid_t __user *,suidp)915 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
916 {
917 const struct cred *cred = current_cred();
918 int retval;
919 uid_t ruid, euid, suid;
920
921 ruid = from_kuid_munged(cred->user_ns, cred->uid);
922 euid = from_kuid_munged(cred->user_ns, cred->euid);
923 suid = from_kuid_munged(cred->user_ns, cred->suid);
924
925 if (!(retval = put_user(ruid, ruidp)) &&
926 !(retval = put_user(euid, euidp)))
927 retval = put_user(suid, suidp);
928
929 return retval;
930 }
931
932 /*
933 * Same as above, but for rgid, egid, sgid.
934 */
SYSCALL_DEFINE3(setresgid,gid_t,rgid,gid_t,egid,gid_t,sgid)935 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
936 {
937 struct user_namespace *ns = current_user_ns();
938 const struct cred *old;
939 struct cred *new;
940 int retval;
941 kgid_t krgid, kegid, ksgid;
942
943 krgid = make_kgid(ns, rgid);
944 kegid = make_kgid(ns, egid);
945 ksgid = make_kgid(ns, sgid);
946
947 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
948 return -EINVAL;
949 if ((egid != (gid_t) -1) && !gid_valid(kegid))
950 return -EINVAL;
951 if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
952 return -EINVAL;
953
954 new = prepare_creds();
955 if (!new)
956 return -ENOMEM;
957 old = current_cred();
958
959 retval = -EPERM;
960 if (!nsown_capable(CAP_SETGID)) {
961 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
962 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
963 goto error;
964 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
965 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
966 goto error;
967 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
968 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
969 goto error;
970 }
971
972 if (rgid != (gid_t) -1)
973 new->gid = krgid;
974 if (egid != (gid_t) -1)
975 new->egid = kegid;
976 if (sgid != (gid_t) -1)
977 new->sgid = ksgid;
978 new->fsgid = new->egid;
979
980 return commit_creds(new);
981
982 error:
983 abort_creds(new);
984 return retval;
985 }
986
SYSCALL_DEFINE3(getresgid,gid_t __user *,rgidp,gid_t __user *,egidp,gid_t __user *,sgidp)987 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
988 {
989 const struct cred *cred = current_cred();
990 int retval;
991 gid_t rgid, egid, sgid;
992
993 rgid = from_kgid_munged(cred->user_ns, cred->gid);
994 egid = from_kgid_munged(cred->user_ns, cred->egid);
995 sgid = from_kgid_munged(cred->user_ns, cred->sgid);
996
997 if (!(retval = put_user(rgid, rgidp)) &&
998 !(retval = put_user(egid, egidp)))
999 retval = put_user(sgid, sgidp);
1000
1001 return retval;
1002 }
1003
1004
1005 /*
1006 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
1007 * is used for "access()" and for the NFS daemon (letting nfsd stay at
1008 * whatever uid it wants to). It normally shadows "euid", except when
1009 * explicitly set by setfsuid() or for access..
1010 */
SYSCALL_DEFINE1(setfsuid,uid_t,uid)1011 SYSCALL_DEFINE1(setfsuid, uid_t, uid)
1012 {
1013 const struct cred *old;
1014 struct cred *new;
1015 uid_t old_fsuid;
1016 kuid_t kuid;
1017
1018 old = current_cred();
1019 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
1020
1021 kuid = make_kuid(old->user_ns, uid);
1022 if (!uid_valid(kuid))
1023 return old_fsuid;
1024
1025 new = prepare_creds();
1026 if (!new)
1027 return old_fsuid;
1028
1029 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
1030 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
1031 nsown_capable(CAP_SETUID)) {
1032 if (!uid_eq(kuid, old->fsuid)) {
1033 new->fsuid = kuid;
1034 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
1035 goto change_okay;
1036 }
1037 }
1038
1039 abort_creds(new);
1040 return old_fsuid;
1041
1042 change_okay:
1043 commit_creds(new);
1044 return old_fsuid;
1045 }
1046
1047 /*
1048 * Samma på svenska..
1049 */
SYSCALL_DEFINE1(setfsgid,gid_t,gid)1050 SYSCALL_DEFINE1(setfsgid, gid_t, gid)
1051 {
1052 const struct cred *old;
1053 struct cred *new;
1054 gid_t old_fsgid;
1055 kgid_t kgid;
1056
1057 old = current_cred();
1058 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
1059
1060 kgid = make_kgid(old->user_ns, gid);
1061 if (!gid_valid(kgid))
1062 return old_fsgid;
1063
1064 new = prepare_creds();
1065 if (!new)
1066 return old_fsgid;
1067
1068 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
1069 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
1070 nsown_capable(CAP_SETGID)) {
1071 if (!gid_eq(kgid, old->fsgid)) {
1072 new->fsgid = kgid;
1073 goto change_okay;
1074 }
1075 }
1076
1077 abort_creds(new);
1078 return old_fsgid;
1079
1080 change_okay:
1081 commit_creds(new);
1082 return old_fsgid;
1083 }
1084
1085 /**
1086 * sys_getpid - return the thread group id of the current process
1087 *
1088 * Note, despite the name, this returns the tgid not the pid. The tgid and
1089 * the pid are identical unless CLONE_THREAD was specified on clone() in
1090 * which case the tgid is the same in all threads of the same group.
1091 *
1092 * This is SMP safe as current->tgid does not change.
1093 */
SYSCALL_DEFINE0(getpid)1094 SYSCALL_DEFINE0(getpid)
1095 {
1096 return task_tgid_vnr(current);
1097 }
1098
1099 /* Thread ID - the internal kernel "pid" */
SYSCALL_DEFINE0(gettid)1100 SYSCALL_DEFINE0(gettid)
1101 {
1102 return task_pid_vnr(current);
1103 }
1104
1105 /*
1106 * Accessing ->real_parent is not SMP-safe, it could
1107 * change from under us. However, we can use a stale
1108 * value of ->real_parent under rcu_read_lock(), see
1109 * release_task()->call_rcu(delayed_put_task_struct).
1110 */
SYSCALL_DEFINE0(getppid)1111 SYSCALL_DEFINE0(getppid)
1112 {
1113 int pid;
1114
1115 rcu_read_lock();
1116 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
1117 rcu_read_unlock();
1118
1119 return pid;
1120 }
1121
SYSCALL_DEFINE0(getuid)1122 SYSCALL_DEFINE0(getuid)
1123 {
1124 /* Only we change this so SMP safe */
1125 return from_kuid_munged(current_user_ns(), current_uid());
1126 }
1127
SYSCALL_DEFINE0(geteuid)1128 SYSCALL_DEFINE0(geteuid)
1129 {
1130 /* Only we change this so SMP safe */
1131 return from_kuid_munged(current_user_ns(), current_euid());
1132 }
1133
SYSCALL_DEFINE0(getgid)1134 SYSCALL_DEFINE0(getgid)
1135 {
1136 /* Only we change this so SMP safe */
1137 return from_kgid_munged(current_user_ns(), current_gid());
1138 }
1139
SYSCALL_DEFINE0(getegid)1140 SYSCALL_DEFINE0(getegid)
1141 {
1142 /* Only we change this so SMP safe */
1143 return from_kgid_munged(current_user_ns(), current_egid());
1144 }
1145
do_sys_times(struct tms * tms)1146 void do_sys_times(struct tms *tms)
1147 {
1148 cputime_t tgutime, tgstime, cutime, cstime;
1149
1150 spin_lock_irq(¤t->sighand->siglock);
1151 thread_group_cputime_adjusted(current, &tgutime, &tgstime);
1152 cutime = current->signal->cutime;
1153 cstime = current->signal->cstime;
1154 spin_unlock_irq(¤t->sighand->siglock);
1155 tms->tms_utime = cputime_to_clock_t(tgutime);
1156 tms->tms_stime = cputime_to_clock_t(tgstime);
1157 tms->tms_cutime = cputime_to_clock_t(cutime);
1158 tms->tms_cstime = cputime_to_clock_t(cstime);
1159 }
1160
SYSCALL_DEFINE1(times,struct tms __user *,tbuf)1161 SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
1162 {
1163 if (tbuf) {
1164 struct tms tmp;
1165
1166 do_sys_times(&tmp);
1167 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
1168 return -EFAULT;
1169 }
1170 force_successful_syscall_return();
1171 return (long) jiffies_64_to_clock_t(get_jiffies_64());
1172 }
1173
1174 /*
1175 * This needs some heavy checking ...
1176 * I just haven't the stomach for it. I also don't fully
1177 * understand sessions/pgrp etc. Let somebody who does explain it.
1178 *
1179 * OK, I think I have the protection semantics right.... this is really
1180 * only important on a multi-user system anyway, to make sure one user
1181 * can't send a signal to a process owned by another. -TYT, 12/12/91
1182 *
1183 * Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
1184 * LBT 04.03.94
1185 */
SYSCALL_DEFINE2(setpgid,pid_t,pid,pid_t,pgid)1186 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1187 {
1188 struct task_struct *p;
1189 struct task_struct *group_leader = current->group_leader;
1190 struct pid *pgrp;
1191 int err;
1192
1193 if (!pid)
1194 pid = task_pid_vnr(group_leader);
1195 if (!pgid)
1196 pgid = pid;
1197 if (pgid < 0)
1198 return -EINVAL;
1199 rcu_read_lock();
1200
1201 /* From this point forward we keep holding onto the tasklist lock
1202 * so that our parent does not change from under us. -DaveM
1203 */
1204 write_lock_irq(&tasklist_lock);
1205
1206 err = -ESRCH;
1207 p = find_task_by_vpid(pid);
1208 if (!p)
1209 goto out;
1210
1211 err = -EINVAL;
1212 if (!thread_group_leader(p))
1213 goto out;
1214
1215 if (same_thread_group(p->real_parent, group_leader)) {
1216 err = -EPERM;
1217 if (task_session(p) != task_session(group_leader))
1218 goto out;
1219 err = -EACCES;
1220 if (p->did_exec)
1221 goto out;
1222 } else {
1223 err = -ESRCH;
1224 if (p != group_leader)
1225 goto out;
1226 }
1227
1228 err = -EPERM;
1229 if (p->signal->leader)
1230 goto out;
1231
1232 pgrp = task_pid(p);
1233 if (pgid != pid) {
1234 struct task_struct *g;
1235
1236 pgrp = find_vpid(pgid);
1237 g = pid_task(pgrp, PIDTYPE_PGID);
1238 if (!g || task_session(g) != task_session(group_leader))
1239 goto out;
1240 }
1241
1242 err = security_task_setpgid(p, pgid);
1243 if (err)
1244 goto out;
1245
1246 if (task_pgrp(p) != pgrp)
1247 change_pid(p, PIDTYPE_PGID, pgrp);
1248
1249 err = 0;
1250 out:
1251 /* All paths lead to here, thus we are safe. -DaveM */
1252 write_unlock_irq(&tasklist_lock);
1253 rcu_read_unlock();
1254 return err;
1255 }
1256
SYSCALL_DEFINE1(getpgid,pid_t,pid)1257 SYSCALL_DEFINE1(getpgid, pid_t, pid)
1258 {
1259 struct task_struct *p;
1260 struct pid *grp;
1261 int retval;
1262
1263 rcu_read_lock();
1264 if (!pid)
1265 grp = task_pgrp(current);
1266 else {
1267 retval = -ESRCH;
1268 p = find_task_by_vpid(pid);
1269 if (!p)
1270 goto out;
1271 grp = task_pgrp(p);
1272 if (!grp)
1273 goto out;
1274
1275 retval = security_task_getpgid(p);
1276 if (retval)
1277 goto out;
1278 }
1279 retval = pid_vnr(grp);
1280 out:
1281 rcu_read_unlock();
1282 return retval;
1283 }
1284
1285 #ifdef __ARCH_WANT_SYS_GETPGRP
1286
SYSCALL_DEFINE0(getpgrp)1287 SYSCALL_DEFINE0(getpgrp)
1288 {
1289 return sys_getpgid(0);
1290 }
1291
1292 #endif
1293
SYSCALL_DEFINE1(getsid,pid_t,pid)1294 SYSCALL_DEFINE1(getsid, pid_t, pid)
1295 {
1296 struct task_struct *p;
1297 struct pid *sid;
1298 int retval;
1299
1300 rcu_read_lock();
1301 if (!pid)
1302 sid = task_session(current);
1303 else {
1304 retval = -ESRCH;
1305 p = find_task_by_vpid(pid);
1306 if (!p)
1307 goto out;
1308 sid = task_session(p);
1309 if (!sid)
1310 goto out;
1311
1312 retval = security_task_getsid(p);
1313 if (retval)
1314 goto out;
1315 }
1316 retval = pid_vnr(sid);
1317 out:
1318 rcu_read_unlock();
1319 return retval;
1320 }
1321
SYSCALL_DEFINE0(setsid)1322 SYSCALL_DEFINE0(setsid)
1323 {
1324 struct task_struct *group_leader = current->group_leader;
1325 struct pid *sid = task_pid(group_leader);
1326 pid_t session = pid_vnr(sid);
1327 int err = -EPERM;
1328
1329 write_lock_irq(&tasklist_lock);
1330 /* Fail if I am already a session leader */
1331 if (group_leader->signal->leader)
1332 goto out;
1333
1334 /* Fail if a process group id already exists that equals the
1335 * proposed session id.
1336 */
1337 if (pid_task(sid, PIDTYPE_PGID))
1338 goto out;
1339
1340 group_leader->signal->leader = 1;
1341 __set_special_pids(sid);
1342
1343 proc_clear_tty(group_leader);
1344
1345 err = session;
1346 out:
1347 write_unlock_irq(&tasklist_lock);
1348 if (err > 0) {
1349 proc_sid_connector(group_leader);
1350 sched_autogroup_create_attach(group_leader);
1351 }
1352 return err;
1353 }
1354
1355 DECLARE_RWSEM(uts_sem);
1356
1357 #ifdef COMPAT_UTS_MACHINE
1358 #define override_architecture(name) \
1359 (personality(current->personality) == PER_LINUX32 && \
1360 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1361 sizeof(COMPAT_UTS_MACHINE)))
1362 #else
1363 #define override_architecture(name) 0
1364 #endif
1365
1366 /*
1367 * Work around broken programs that cannot handle "Linux 3.0".
1368 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1369 */
override_release(char __user * release,size_t len)1370 static int override_release(char __user *release, size_t len)
1371 {
1372 int ret = 0;
1373
1374 if (current->personality & UNAME26) {
1375 const char *rest = UTS_RELEASE;
1376 char buf[65] = { 0 };
1377 int ndots = 0;
1378 unsigned v;
1379 size_t copy;
1380
1381 while (*rest) {
1382 if (*rest == '.' && ++ndots >= 3)
1383 break;
1384 if (!isdigit(*rest) && *rest != '.')
1385 break;
1386 rest++;
1387 }
1388 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
1389 copy = clamp_t(size_t, len, 1, sizeof(buf));
1390 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1391 ret = copy_to_user(release, buf, copy + 1);
1392 }
1393 return ret;
1394 }
1395
SYSCALL_DEFINE1(newuname,struct new_utsname __user *,name)1396 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1397 {
1398 int errno = 0;
1399
1400 down_read(&uts_sem);
1401 if (copy_to_user(name, utsname(), sizeof *name))
1402 errno = -EFAULT;
1403 up_read(&uts_sem);
1404
1405 if (!errno && override_release(name->release, sizeof(name->release)))
1406 errno = -EFAULT;
1407 if (!errno && override_architecture(name))
1408 errno = -EFAULT;
1409 return errno;
1410 }
1411
1412 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1413 /*
1414 * Old cruft
1415 */
SYSCALL_DEFINE1(uname,struct old_utsname __user *,name)1416 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1417 {
1418 int error = 0;
1419
1420 if (!name)
1421 return -EFAULT;
1422
1423 down_read(&uts_sem);
1424 if (copy_to_user(name, utsname(), sizeof(*name)))
1425 error = -EFAULT;
1426 up_read(&uts_sem);
1427
1428 if (!error && override_release(name->release, sizeof(name->release)))
1429 error = -EFAULT;
1430 if (!error && override_architecture(name))
1431 error = -EFAULT;
1432 return error;
1433 }
1434
SYSCALL_DEFINE1(olduname,struct oldold_utsname __user *,name)1435 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1436 {
1437 int error;
1438
1439 if (!name)
1440 return -EFAULT;
1441 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1442 return -EFAULT;
1443
1444 down_read(&uts_sem);
1445 error = __copy_to_user(&name->sysname, &utsname()->sysname,
1446 __OLD_UTS_LEN);
1447 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1448 error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1449 __OLD_UTS_LEN);
1450 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1451 error |= __copy_to_user(&name->release, &utsname()->release,
1452 __OLD_UTS_LEN);
1453 error |= __put_user(0, name->release + __OLD_UTS_LEN);
1454 error |= __copy_to_user(&name->version, &utsname()->version,
1455 __OLD_UTS_LEN);
1456 error |= __put_user(0, name->version + __OLD_UTS_LEN);
1457 error |= __copy_to_user(&name->machine, &utsname()->machine,
1458 __OLD_UTS_LEN);
1459 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1460 up_read(&uts_sem);
1461
1462 if (!error && override_architecture(name))
1463 error = -EFAULT;
1464 if (!error && override_release(name->release, sizeof(name->release)))
1465 error = -EFAULT;
1466 return error ? -EFAULT : 0;
1467 }
1468 #endif
1469
SYSCALL_DEFINE2(sethostname,char __user *,name,int,len)1470 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1471 {
1472 int errno;
1473 char tmp[__NEW_UTS_LEN];
1474
1475 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1476 return -EPERM;
1477
1478 if (len < 0 || len > __NEW_UTS_LEN)
1479 return -EINVAL;
1480 down_write(&uts_sem);
1481 errno = -EFAULT;
1482 if (!copy_from_user(tmp, name, len)) {
1483 struct new_utsname *u = utsname();
1484
1485 memcpy(u->nodename, tmp, len);
1486 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1487 errno = 0;
1488 uts_proc_notify(UTS_PROC_HOSTNAME);
1489 }
1490 up_write(&uts_sem);
1491 return errno;
1492 }
1493
1494 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1495
SYSCALL_DEFINE2(gethostname,char __user *,name,int,len)1496 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1497 {
1498 int i, errno;
1499 struct new_utsname *u;
1500
1501 if (len < 0)
1502 return -EINVAL;
1503 down_read(&uts_sem);
1504 u = utsname();
1505 i = 1 + strlen(u->nodename);
1506 if (i > len)
1507 i = len;
1508 errno = 0;
1509 if (copy_to_user(name, u->nodename, i))
1510 errno = -EFAULT;
1511 up_read(&uts_sem);
1512 return errno;
1513 }
1514
1515 #endif
1516
1517 /*
1518 * Only setdomainname; getdomainname can be implemented by calling
1519 * uname()
1520 */
SYSCALL_DEFINE2(setdomainname,char __user *,name,int,len)1521 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1522 {
1523 int errno;
1524 char tmp[__NEW_UTS_LEN];
1525
1526 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1527 return -EPERM;
1528 if (len < 0 || len > __NEW_UTS_LEN)
1529 return -EINVAL;
1530
1531 down_write(&uts_sem);
1532 errno = -EFAULT;
1533 if (!copy_from_user(tmp, name, len)) {
1534 struct new_utsname *u = utsname();
1535
1536 memcpy(u->domainname, tmp, len);
1537 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1538 errno = 0;
1539 uts_proc_notify(UTS_PROC_DOMAINNAME);
1540 }
1541 up_write(&uts_sem);
1542 return errno;
1543 }
1544
SYSCALL_DEFINE2(getrlimit,unsigned int,resource,struct rlimit __user *,rlim)1545 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1546 {
1547 struct rlimit value;
1548 int ret;
1549
1550 ret = do_prlimit(current, resource, NULL, &value);
1551 if (!ret)
1552 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1553
1554 return ret;
1555 }
1556
1557 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1558
1559 /*
1560 * Back compatibility for getrlimit. Needed for some apps.
1561 */
1562
SYSCALL_DEFINE2(old_getrlimit,unsigned int,resource,struct rlimit __user *,rlim)1563 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1564 struct rlimit __user *, rlim)
1565 {
1566 struct rlimit x;
1567 if (resource >= RLIM_NLIMITS)
1568 return -EINVAL;
1569
1570 task_lock(current->group_leader);
1571 x = current->signal->rlim[resource];
1572 task_unlock(current->group_leader);
1573 if (x.rlim_cur > 0x7FFFFFFF)
1574 x.rlim_cur = 0x7FFFFFFF;
1575 if (x.rlim_max > 0x7FFFFFFF)
1576 x.rlim_max = 0x7FFFFFFF;
1577 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1578 }
1579
1580 #endif
1581
rlim64_is_infinity(__u64 rlim64)1582 static inline bool rlim64_is_infinity(__u64 rlim64)
1583 {
1584 #if BITS_PER_LONG < 64
1585 return rlim64 >= ULONG_MAX;
1586 #else
1587 return rlim64 == RLIM64_INFINITY;
1588 #endif
1589 }
1590
rlim_to_rlim64(const struct rlimit * rlim,struct rlimit64 * rlim64)1591 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1592 {
1593 if (rlim->rlim_cur == RLIM_INFINITY)
1594 rlim64->rlim_cur = RLIM64_INFINITY;
1595 else
1596 rlim64->rlim_cur = rlim->rlim_cur;
1597 if (rlim->rlim_max == RLIM_INFINITY)
1598 rlim64->rlim_max = RLIM64_INFINITY;
1599 else
1600 rlim64->rlim_max = rlim->rlim_max;
1601 }
1602
rlim64_to_rlim(const struct rlimit64 * rlim64,struct rlimit * rlim)1603 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1604 {
1605 if (rlim64_is_infinity(rlim64->rlim_cur))
1606 rlim->rlim_cur = RLIM_INFINITY;
1607 else
1608 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1609 if (rlim64_is_infinity(rlim64->rlim_max))
1610 rlim->rlim_max = RLIM_INFINITY;
1611 else
1612 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1613 }
1614
1615 /* make sure you are allowed to change @tsk limits before calling this */
do_prlimit(struct task_struct * tsk,unsigned int resource,struct rlimit * new_rlim,struct rlimit * old_rlim)1616 int do_prlimit(struct task_struct *tsk, unsigned int resource,
1617 struct rlimit *new_rlim, struct rlimit *old_rlim)
1618 {
1619 struct rlimit *rlim;
1620 int retval = 0;
1621
1622 if (resource >= RLIM_NLIMITS)
1623 return -EINVAL;
1624 if (new_rlim) {
1625 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1626 return -EINVAL;
1627 if (resource == RLIMIT_NOFILE &&
1628 new_rlim->rlim_max > sysctl_nr_open)
1629 return -EPERM;
1630 }
1631
1632 /* protect tsk->signal and tsk->sighand from disappearing */
1633 read_lock(&tasklist_lock);
1634 if (!tsk->sighand) {
1635 retval = -ESRCH;
1636 goto out;
1637 }
1638
1639 rlim = tsk->signal->rlim + resource;
1640 task_lock(tsk->group_leader);
1641 if (new_rlim) {
1642 /* Keep the capable check against init_user_ns until
1643 cgroups can contain all limits */
1644 if (new_rlim->rlim_max > rlim->rlim_max &&
1645 !capable(CAP_SYS_RESOURCE))
1646 retval = -EPERM;
1647 if (!retval)
1648 retval = security_task_setrlimit(tsk->group_leader,
1649 resource, new_rlim);
1650 if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1651 /*
1652 * The caller is asking for an immediate RLIMIT_CPU
1653 * expiry. But we use the zero value to mean "it was
1654 * never set". So let's cheat and make it one second
1655 * instead
1656 */
1657 new_rlim->rlim_cur = 1;
1658 }
1659 }
1660 if (!retval) {
1661 if (old_rlim)
1662 *old_rlim = *rlim;
1663 if (new_rlim)
1664 *rlim = *new_rlim;
1665 }
1666 task_unlock(tsk->group_leader);
1667
1668 /*
1669 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1670 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1671 * very long-standing error, and fixing it now risks breakage of
1672 * applications, so we live with it
1673 */
1674 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1675 new_rlim->rlim_cur != RLIM_INFINITY)
1676 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1677 out:
1678 read_unlock(&tasklist_lock);
1679 return retval;
1680 }
1681
1682 /* rcu lock must be held */
check_prlimit_permission(struct task_struct * task)1683 static int check_prlimit_permission(struct task_struct *task)
1684 {
1685 const struct cred *cred = current_cred(), *tcred;
1686
1687 if (current == task)
1688 return 0;
1689
1690 tcred = __task_cred(task);
1691 if (uid_eq(cred->uid, tcred->euid) &&
1692 uid_eq(cred->uid, tcred->suid) &&
1693 uid_eq(cred->uid, tcred->uid) &&
1694 gid_eq(cred->gid, tcred->egid) &&
1695 gid_eq(cred->gid, tcred->sgid) &&
1696 gid_eq(cred->gid, tcred->gid))
1697 return 0;
1698 if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1699 return 0;
1700
1701 return -EPERM;
1702 }
1703
SYSCALL_DEFINE4(prlimit64,pid_t,pid,unsigned int,resource,const struct rlimit64 __user *,new_rlim,struct rlimit64 __user *,old_rlim)1704 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1705 const struct rlimit64 __user *, new_rlim,
1706 struct rlimit64 __user *, old_rlim)
1707 {
1708 struct rlimit64 old64, new64;
1709 struct rlimit old, new;
1710 struct task_struct *tsk;
1711 int ret;
1712
1713 if (new_rlim) {
1714 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1715 return -EFAULT;
1716 rlim64_to_rlim(&new64, &new);
1717 }
1718
1719 rcu_read_lock();
1720 tsk = pid ? find_task_by_vpid(pid) : current;
1721 if (!tsk) {
1722 rcu_read_unlock();
1723 return -ESRCH;
1724 }
1725 ret = check_prlimit_permission(tsk);
1726 if (ret) {
1727 rcu_read_unlock();
1728 return ret;
1729 }
1730 get_task_struct(tsk);
1731 rcu_read_unlock();
1732
1733 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1734 old_rlim ? &old : NULL);
1735
1736 if (!ret && old_rlim) {
1737 rlim_to_rlim64(&old, &old64);
1738 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1739 ret = -EFAULT;
1740 }
1741
1742 put_task_struct(tsk);
1743 return ret;
1744 }
1745
SYSCALL_DEFINE2(setrlimit,unsigned int,resource,struct rlimit __user *,rlim)1746 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1747 {
1748 struct rlimit new_rlim;
1749
1750 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1751 return -EFAULT;
1752 return do_prlimit(current, resource, &new_rlim, NULL);
1753 }
1754
1755 /*
1756 * It would make sense to put struct rusage in the task_struct,
1757 * except that would make the task_struct be *really big*. After
1758 * task_struct gets moved into malloc'ed memory, it would
1759 * make sense to do this. It will make moving the rest of the information
1760 * a lot simpler! (Which we're not doing right now because we're not
1761 * measuring them yet).
1762 *
1763 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1764 * races with threads incrementing their own counters. But since word
1765 * reads are atomic, we either get new values or old values and we don't
1766 * care which for the sums. We always take the siglock to protect reading
1767 * the c* fields from p->signal from races with exit.c updating those
1768 * fields when reaping, so a sample either gets all the additions of a
1769 * given child after it's reaped, or none so this sample is before reaping.
1770 *
1771 * Locking:
1772 * We need to take the siglock for CHILDEREN, SELF and BOTH
1773 * for the cases current multithreaded, non-current single threaded
1774 * non-current multithreaded. Thread traversal is now safe with
1775 * the siglock held.
1776 * Strictly speaking, we donot need to take the siglock if we are current and
1777 * single threaded, as no one else can take our signal_struct away, no one
1778 * else can reap the children to update signal->c* counters, and no one else
1779 * can race with the signal-> fields. If we do not take any lock, the
1780 * signal-> fields could be read out of order while another thread was just
1781 * exiting. So we should place a read memory barrier when we avoid the lock.
1782 * On the writer side, write memory barrier is implied in __exit_signal
1783 * as __exit_signal releases the siglock spinlock after updating the signal->
1784 * fields. But we don't do this yet to keep things simple.
1785 *
1786 */
1787
accumulate_thread_rusage(struct task_struct * t,struct rusage * r)1788 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1789 {
1790 r->ru_nvcsw += t->nvcsw;
1791 r->ru_nivcsw += t->nivcsw;
1792 r->ru_minflt += t->min_flt;
1793 r->ru_majflt += t->maj_flt;
1794 r->ru_inblock += task_io_get_inblock(t);
1795 r->ru_oublock += task_io_get_oublock(t);
1796 }
1797
k_getrusage(struct task_struct * p,int who,struct rusage * r)1798 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1799 {
1800 struct task_struct *t;
1801 unsigned long flags;
1802 cputime_t tgutime, tgstime, utime, stime;
1803 unsigned long maxrss = 0;
1804
1805 memset((char *) r, 0, sizeof *r);
1806 utime = stime = 0;
1807
1808 if (who == RUSAGE_THREAD) {
1809 task_cputime_adjusted(current, &utime, &stime);
1810 accumulate_thread_rusage(p, r);
1811 maxrss = p->signal->maxrss;
1812 goto out;
1813 }
1814
1815 if (!lock_task_sighand(p, &flags))
1816 return;
1817
1818 switch (who) {
1819 case RUSAGE_BOTH:
1820 case RUSAGE_CHILDREN:
1821 utime = p->signal->cutime;
1822 stime = p->signal->cstime;
1823 r->ru_nvcsw = p->signal->cnvcsw;
1824 r->ru_nivcsw = p->signal->cnivcsw;
1825 r->ru_minflt = p->signal->cmin_flt;
1826 r->ru_majflt = p->signal->cmaj_flt;
1827 r->ru_inblock = p->signal->cinblock;
1828 r->ru_oublock = p->signal->coublock;
1829 maxrss = p->signal->cmaxrss;
1830
1831 if (who == RUSAGE_CHILDREN)
1832 break;
1833
1834 case RUSAGE_SELF:
1835 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1836 utime += tgutime;
1837 stime += tgstime;
1838 r->ru_nvcsw += p->signal->nvcsw;
1839 r->ru_nivcsw += p->signal->nivcsw;
1840 r->ru_minflt += p->signal->min_flt;
1841 r->ru_majflt += p->signal->maj_flt;
1842 r->ru_inblock += p->signal->inblock;
1843 r->ru_oublock += p->signal->oublock;
1844 if (maxrss < p->signal->maxrss)
1845 maxrss = p->signal->maxrss;
1846 t = p;
1847 do {
1848 accumulate_thread_rusage(t, r);
1849 t = next_thread(t);
1850 } while (t != p);
1851 break;
1852
1853 default:
1854 BUG();
1855 }
1856 unlock_task_sighand(p, &flags);
1857
1858 out:
1859 cputime_to_timeval(utime, &r->ru_utime);
1860 cputime_to_timeval(stime, &r->ru_stime);
1861
1862 if (who != RUSAGE_CHILDREN) {
1863 struct mm_struct *mm = get_task_mm(p);
1864 if (mm) {
1865 setmax_mm_hiwater_rss(&maxrss, mm);
1866 mmput(mm);
1867 }
1868 }
1869 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1870 }
1871
getrusage(struct task_struct * p,int who,struct rusage __user * ru)1872 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1873 {
1874 struct rusage r;
1875 k_getrusage(p, who, &r);
1876 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1877 }
1878
SYSCALL_DEFINE2(getrusage,int,who,struct rusage __user *,ru)1879 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1880 {
1881 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1882 who != RUSAGE_THREAD)
1883 return -EINVAL;
1884 return getrusage(current, who, ru);
1885 }
1886
1887 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(getrusage,int,who,struct compat_rusage __user *,ru)1888 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1889 {
1890 struct rusage r;
1891
1892 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1893 who != RUSAGE_THREAD)
1894 return -EINVAL;
1895
1896 k_getrusage(current, who, &r);
1897 return put_compat_rusage(&r, ru);
1898 }
1899 #endif
1900
SYSCALL_DEFINE1(umask,int,mask)1901 SYSCALL_DEFINE1(umask, int, mask)
1902 {
1903 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1904 return mask;
1905 }
1906
prctl_set_mm_exe_file(struct mm_struct * mm,unsigned int fd)1907 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1908 {
1909 struct fd exe;
1910 struct inode *inode;
1911 int err;
1912
1913 exe = fdget(fd);
1914 if (!exe.file)
1915 return -EBADF;
1916
1917 inode = file_inode(exe.file);
1918
1919 /*
1920 * Because the original mm->exe_file points to executable file, make
1921 * sure that this one is executable as well, to avoid breaking an
1922 * overall picture.
1923 */
1924 err = -EACCES;
1925 if (!S_ISREG(inode->i_mode) ||
1926 exe.file->f_path.mnt->mnt_flags & MNT_NOEXEC)
1927 goto exit;
1928
1929 err = inode_permission(inode, MAY_EXEC);
1930 if (err)
1931 goto exit;
1932
1933 down_write(&mm->mmap_sem);
1934
1935 /*
1936 * Forbid mm->exe_file change if old file still mapped.
1937 */
1938 err = -EBUSY;
1939 if (mm->exe_file) {
1940 struct vm_area_struct *vma;
1941
1942 for (vma = mm->mmap; vma; vma = vma->vm_next)
1943 if (vma->vm_file &&
1944 path_equal(&vma->vm_file->f_path,
1945 &mm->exe_file->f_path))
1946 goto exit_unlock;
1947 }
1948
1949 /*
1950 * The symlink can be changed only once, just to disallow arbitrary
1951 * transitions malicious software might bring in. This means one
1952 * could make a snapshot over all processes running and monitor
1953 * /proc/pid/exe changes to notice unusual activity if needed.
1954 */
1955 err = -EPERM;
1956 if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
1957 goto exit_unlock;
1958
1959 err = 0;
1960 set_mm_exe_file(mm, exe.file); /* this grabs a reference to exe.file */
1961 exit_unlock:
1962 up_write(&mm->mmap_sem);
1963
1964 exit:
1965 fdput(exe);
1966 return err;
1967 }
1968
prctl_set_mm(int opt,unsigned long addr,unsigned long arg4,unsigned long arg5)1969 static int prctl_set_mm(int opt, unsigned long addr,
1970 unsigned long arg4, unsigned long arg5)
1971 {
1972 unsigned long rlim = rlimit(RLIMIT_DATA);
1973 struct mm_struct *mm = current->mm;
1974 struct vm_area_struct *vma;
1975 int error;
1976
1977 if (arg5 || (arg4 && opt != PR_SET_MM_AUXV))
1978 return -EINVAL;
1979
1980 if (!capable(CAP_SYS_RESOURCE))
1981 return -EPERM;
1982
1983 if (opt == PR_SET_MM_EXE_FILE)
1984 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
1985
1986 if (addr >= TASK_SIZE || addr < mmap_min_addr)
1987 return -EINVAL;
1988
1989 error = -EINVAL;
1990
1991 down_read(&mm->mmap_sem);
1992 vma = find_vma(mm, addr);
1993
1994 switch (opt) {
1995 case PR_SET_MM_START_CODE:
1996 mm->start_code = addr;
1997 break;
1998 case PR_SET_MM_END_CODE:
1999 mm->end_code = addr;
2000 break;
2001 case PR_SET_MM_START_DATA:
2002 mm->start_data = addr;
2003 break;
2004 case PR_SET_MM_END_DATA:
2005 mm->end_data = addr;
2006 break;
2007
2008 case PR_SET_MM_START_BRK:
2009 if (addr <= mm->end_data)
2010 goto out;
2011
2012 if (rlim < RLIM_INFINITY &&
2013 (mm->brk - addr) +
2014 (mm->end_data - mm->start_data) > rlim)
2015 goto out;
2016
2017 mm->start_brk = addr;
2018 break;
2019
2020 case PR_SET_MM_BRK:
2021 if (addr <= mm->end_data)
2022 goto out;
2023
2024 if (rlim < RLIM_INFINITY &&
2025 (addr - mm->start_brk) +
2026 (mm->end_data - mm->start_data) > rlim)
2027 goto out;
2028
2029 mm->brk = addr;
2030 break;
2031
2032 /*
2033 * If command line arguments and environment
2034 * are placed somewhere else on stack, we can
2035 * set them up here, ARG_START/END to setup
2036 * command line argumets and ENV_START/END
2037 * for environment.
2038 */
2039 case PR_SET_MM_START_STACK:
2040 case PR_SET_MM_ARG_START:
2041 case PR_SET_MM_ARG_END:
2042 case PR_SET_MM_ENV_START:
2043 case PR_SET_MM_ENV_END:
2044 if (!vma) {
2045 error = -EFAULT;
2046 goto out;
2047 }
2048 if (opt == PR_SET_MM_START_STACK)
2049 mm->start_stack = addr;
2050 else if (opt == PR_SET_MM_ARG_START)
2051 mm->arg_start = addr;
2052 else if (opt == PR_SET_MM_ARG_END)
2053 mm->arg_end = addr;
2054 else if (opt == PR_SET_MM_ENV_START)
2055 mm->env_start = addr;
2056 else if (opt == PR_SET_MM_ENV_END)
2057 mm->env_end = addr;
2058 break;
2059
2060 /*
2061 * This doesn't move auxiliary vector itself
2062 * since it's pinned to mm_struct, but allow
2063 * to fill vector with new values. It's up
2064 * to a caller to provide sane values here
2065 * otherwise user space tools which use this
2066 * vector might be unhappy.
2067 */
2068 case PR_SET_MM_AUXV: {
2069 unsigned long user_auxv[AT_VECTOR_SIZE];
2070
2071 if (arg4 > sizeof(user_auxv))
2072 goto out;
2073 up_read(&mm->mmap_sem);
2074
2075 if (copy_from_user(user_auxv, (const void __user *)addr, arg4))
2076 return -EFAULT;
2077
2078 /* Make sure the last entry is always AT_NULL */
2079 user_auxv[AT_VECTOR_SIZE - 2] = 0;
2080 user_auxv[AT_VECTOR_SIZE - 1] = 0;
2081
2082 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2083
2084 task_lock(current);
2085 memcpy(mm->saved_auxv, user_auxv, arg4);
2086 task_unlock(current);
2087
2088 return 0;
2089 }
2090 default:
2091 goto out;
2092 }
2093
2094 error = 0;
2095 out:
2096 up_read(&mm->mmap_sem);
2097 return error;
2098 }
2099
2100 #ifdef CONFIG_CHECKPOINT_RESTORE
prctl_get_tid_address(struct task_struct * me,int __user ** tid_addr)2101 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2102 {
2103 return put_user(me->clear_child_tid, tid_addr);
2104 }
2105 #else
prctl_get_tid_address(struct task_struct * me,int __user ** tid_addr)2106 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2107 {
2108 return -EINVAL;
2109 }
2110 #endif
2111
2112 #ifdef CONFIG_MMU
prctl_update_vma_anon_name(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,const char __user * name_addr)2113 static int prctl_update_vma_anon_name(struct vm_area_struct *vma,
2114 struct vm_area_struct **prev,
2115 unsigned long start, unsigned long end,
2116 const char __user *name_addr)
2117 {
2118 struct mm_struct * mm = vma->vm_mm;
2119 int error = 0;
2120 pgoff_t pgoff;
2121
2122 if (name_addr == vma_get_anon_name(vma)) {
2123 *prev = vma;
2124 goto out;
2125 }
2126
2127 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
2128 *prev = vma_merge(mm, *prev, start, end, vma->vm_flags, vma->anon_vma,
2129 vma->vm_file, pgoff, vma_policy(vma),
2130 name_addr);
2131 if (*prev) {
2132 vma = *prev;
2133 goto success;
2134 }
2135
2136 *prev = vma;
2137
2138 if (start != vma->vm_start) {
2139 error = split_vma(mm, vma, start, 1);
2140 if (error)
2141 goto out;
2142 }
2143
2144 if (end != vma->vm_end) {
2145 error = split_vma(mm, vma, end, 0);
2146 if (error)
2147 goto out;
2148 }
2149
2150 success:
2151 if (!vma->vm_file)
2152 vma->shared.anon_name = name_addr;
2153
2154 out:
2155 if (error == -ENOMEM)
2156 error = -EAGAIN;
2157 return error;
2158 }
2159
prctl_set_vma_anon_name(unsigned long start,unsigned long end,unsigned long arg)2160 static int prctl_set_vma_anon_name(unsigned long start, unsigned long end,
2161 unsigned long arg)
2162 {
2163 unsigned long tmp;
2164 struct vm_area_struct * vma, *prev;
2165 int unmapped_error = 0;
2166 int error = -EINVAL;
2167
2168 /*
2169 * If the interval [start,end) covers some unmapped address
2170 * ranges, just ignore them, but return -ENOMEM at the end.
2171 * - this matches the handling in madvise.
2172 */
2173 vma = find_vma_prev(current->mm, start, &prev);
2174 if (vma && start > vma->vm_start)
2175 prev = vma;
2176
2177 for (;;) {
2178 /* Still start < end. */
2179 error = -ENOMEM;
2180 if (!vma)
2181 return error;
2182
2183 /* Here start < (end|vma->vm_end). */
2184 if (start < vma->vm_start) {
2185 unmapped_error = -ENOMEM;
2186 start = vma->vm_start;
2187 if (start >= end)
2188 return error;
2189 }
2190
2191 /* Here vma->vm_start <= start < (end|vma->vm_end) */
2192 tmp = vma->vm_end;
2193 if (end < tmp)
2194 tmp = end;
2195
2196 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
2197 error = prctl_update_vma_anon_name(vma, &prev, start, tmp,
2198 (const char __user *)arg);
2199 if (error)
2200 return error;
2201 start = tmp;
2202 if (prev && start < prev->vm_end)
2203 start = prev->vm_end;
2204 error = unmapped_error;
2205 if (start >= end)
2206 return error;
2207 if (prev)
2208 vma = prev->vm_next;
2209 else /* madvise_remove dropped mmap_sem */
2210 vma = find_vma(current->mm, start);
2211 }
2212 }
2213
prctl_set_vma(unsigned long opt,unsigned long start,unsigned long len_in,unsigned long arg)2214 static int prctl_set_vma(unsigned long opt, unsigned long start,
2215 unsigned long len_in, unsigned long arg)
2216 {
2217 struct mm_struct *mm = current->mm;
2218 int error;
2219 unsigned long len;
2220 unsigned long end;
2221
2222 if (start & ~PAGE_MASK)
2223 return -EINVAL;
2224 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
2225
2226 /* Check to see whether len was rounded up from small -ve to zero */
2227 if (len_in && !len)
2228 return -EINVAL;
2229
2230 end = start + len;
2231 if (end < start)
2232 return -EINVAL;
2233
2234 if (end == start)
2235 return 0;
2236
2237 down_write(&mm->mmap_sem);
2238
2239 switch (opt) {
2240 case PR_SET_VMA_ANON_NAME:
2241 error = prctl_set_vma_anon_name(start, end, arg);
2242 break;
2243 default:
2244 error = -EINVAL;
2245 }
2246
2247 up_write(&mm->mmap_sem);
2248
2249 return error;
2250 }
2251 #else /* CONFIG_MMU */
prctl_set_vma(unsigned long opt,unsigned long start,unsigned long len_in,unsigned long arg)2252 static int prctl_set_vma(unsigned long opt, unsigned long start,
2253 unsigned long len_in, unsigned long arg)
2254 {
2255 return -EINVAL;
2256 }
2257 #endif
2258
SYSCALL_DEFINE5(prctl,int,option,unsigned long,arg2,unsigned long,arg3,unsigned long,arg4,unsigned long,arg5)2259 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2260 unsigned long, arg4, unsigned long, arg5)
2261 {
2262 struct task_struct *me = current;
2263 struct task_struct *tsk;
2264 unsigned char comm[sizeof(me->comm)];
2265 long error;
2266
2267 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2268 if (error != -ENOSYS)
2269 return error;
2270
2271 error = 0;
2272 switch (option) {
2273 case PR_SET_PDEATHSIG:
2274 if (!valid_signal(arg2)) {
2275 error = -EINVAL;
2276 break;
2277 }
2278 me->pdeath_signal = arg2;
2279 break;
2280 case PR_GET_PDEATHSIG:
2281 error = put_user(me->pdeath_signal, (int __user *)arg2);
2282 break;
2283 case PR_GET_DUMPABLE:
2284 error = get_dumpable(me->mm);
2285 break;
2286 case PR_SET_DUMPABLE:
2287 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2288 error = -EINVAL;
2289 break;
2290 }
2291 set_dumpable(me->mm, arg2);
2292 break;
2293
2294 case PR_SET_UNALIGN:
2295 error = SET_UNALIGN_CTL(me, arg2);
2296 break;
2297 case PR_GET_UNALIGN:
2298 error = GET_UNALIGN_CTL(me, arg2);
2299 break;
2300 case PR_SET_FPEMU:
2301 error = SET_FPEMU_CTL(me, arg2);
2302 break;
2303 case PR_GET_FPEMU:
2304 error = GET_FPEMU_CTL(me, arg2);
2305 break;
2306 case PR_SET_FPEXC:
2307 error = SET_FPEXC_CTL(me, arg2);
2308 break;
2309 case PR_GET_FPEXC:
2310 error = GET_FPEXC_CTL(me, arg2);
2311 break;
2312 case PR_GET_TIMING:
2313 error = PR_TIMING_STATISTICAL;
2314 break;
2315 case PR_SET_TIMING:
2316 if (arg2 != PR_TIMING_STATISTICAL)
2317 error = -EINVAL;
2318 break;
2319 case PR_SET_NAME:
2320 comm[sizeof(me->comm) - 1] = 0;
2321 if (strncpy_from_user(comm, (char __user *)arg2,
2322 sizeof(me->comm) - 1) < 0)
2323 return -EFAULT;
2324 set_task_comm(me, comm);
2325 proc_comm_connector(me);
2326 break;
2327 case PR_GET_NAME:
2328 get_task_comm(comm, me);
2329 if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2330 return -EFAULT;
2331 break;
2332 case PR_GET_ENDIAN:
2333 error = GET_ENDIAN(me, arg2);
2334 break;
2335 case PR_SET_ENDIAN:
2336 error = SET_ENDIAN(me, arg2);
2337 break;
2338 case PR_GET_SECCOMP:
2339 error = prctl_get_seccomp();
2340 break;
2341 case PR_SET_SECCOMP:
2342 error = prctl_set_seccomp(arg2, (char __user *)arg3);
2343 break;
2344 case PR_GET_TSC:
2345 error = GET_TSC_CTL(arg2);
2346 break;
2347 case PR_SET_TSC:
2348 error = SET_TSC_CTL(arg2);
2349 break;
2350 case PR_TASK_PERF_EVENTS_DISABLE:
2351 error = perf_event_task_disable();
2352 break;
2353 case PR_TASK_PERF_EVENTS_ENABLE:
2354 error = perf_event_task_enable();
2355 break;
2356 case PR_GET_TIMERSLACK:
2357 error = current->timer_slack_ns;
2358 break;
2359 case PR_SET_TIMERSLACK:
2360 if (arg2 <= 0)
2361 current->timer_slack_ns =
2362 current->default_timer_slack_ns;
2363 else
2364 current->timer_slack_ns = arg2;
2365 break;
2366 case PR_MCE_KILL:
2367 if (arg4 | arg5)
2368 return -EINVAL;
2369 switch (arg2) {
2370 case PR_MCE_KILL_CLEAR:
2371 if (arg3 != 0)
2372 return -EINVAL;
2373 current->flags &= ~PF_MCE_PROCESS;
2374 break;
2375 case PR_MCE_KILL_SET:
2376 current->flags |= PF_MCE_PROCESS;
2377 if (arg3 == PR_MCE_KILL_EARLY)
2378 current->flags |= PF_MCE_EARLY;
2379 else if (arg3 == PR_MCE_KILL_LATE)
2380 current->flags &= ~PF_MCE_EARLY;
2381 else if (arg3 == PR_MCE_KILL_DEFAULT)
2382 current->flags &=
2383 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
2384 else
2385 return -EINVAL;
2386 break;
2387 default:
2388 return -EINVAL;
2389 }
2390 break;
2391 case PR_MCE_KILL_GET:
2392 if (arg2 | arg3 | arg4 | arg5)
2393 return -EINVAL;
2394 if (current->flags & PF_MCE_PROCESS)
2395 error = (current->flags & PF_MCE_EARLY) ?
2396 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2397 else
2398 error = PR_MCE_KILL_DEFAULT;
2399 break;
2400 case PR_SET_MM:
2401 error = prctl_set_mm(arg2, arg3, arg4, arg5);
2402 break;
2403 case PR_GET_TID_ADDRESS:
2404 error = prctl_get_tid_address(me, (int __user **)arg2);
2405 break;
2406 case PR_SET_TIMERSLACK_PID:
2407 if (task_pid_vnr(current) != (pid_t)arg3 &&
2408 !capable(CAP_SYS_NICE))
2409 return -EPERM;
2410 rcu_read_lock();
2411 tsk = find_task_by_vpid((pid_t)arg3);
2412 if (tsk == NULL) {
2413 rcu_read_unlock();
2414 return -EINVAL;
2415 }
2416 get_task_struct(tsk);
2417 rcu_read_unlock();
2418 if (arg2 <= 0)
2419 tsk->timer_slack_ns =
2420 tsk->default_timer_slack_ns;
2421 else
2422 tsk->timer_slack_ns = arg2;
2423 put_task_struct(tsk);
2424 error = 0;
2425 break;
2426 case PR_SET_CHILD_SUBREAPER:
2427 me->signal->is_child_subreaper = !!arg2;
2428 break;
2429 case PR_GET_CHILD_SUBREAPER:
2430 error = put_user(me->signal->is_child_subreaper,
2431 (int __user *)arg2);
2432 break;
2433 case PR_SET_NO_NEW_PRIVS:
2434 if (arg2 != 1 || arg3 || arg4 || arg5)
2435 return -EINVAL;
2436
2437 task_set_no_new_privs(current);
2438 break;
2439 case PR_GET_NO_NEW_PRIVS:
2440 if (arg2 || arg3 || arg4 || arg5)
2441 return -EINVAL;
2442 return task_no_new_privs(current) ? 1 : 0;
2443 case PR_SET_FP_MODE:
2444 error = SET_FP_MODE(me, arg2);
2445 break;
2446 case PR_GET_FP_MODE:
2447 error = GET_FP_MODE(me);
2448 break;
2449 case PR_SET_VMA:
2450 error = prctl_set_vma(arg2, arg3, arg4, arg5);
2451 break;
2452 default:
2453 error = -EINVAL;
2454 break;
2455 }
2456 return error;
2457 }
2458
SYSCALL_DEFINE3(getcpu,unsigned __user *,cpup,unsigned __user *,nodep,struct getcpu_cache __user *,unused)2459 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2460 struct getcpu_cache __user *, unused)
2461 {
2462 int err = 0;
2463 int cpu = raw_smp_processor_id();
2464 if (cpup)
2465 err |= put_user(cpu, cpup);
2466 if (nodep)
2467 err |= put_user(cpu_to_node(cpu), nodep);
2468 return err ? -EFAULT : 0;
2469 }
2470
2471 char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff";
2472
__orderly_poweroff(bool force)2473 static int __orderly_poweroff(bool force)
2474 {
2475 char **argv;
2476 static char *envp[] = {
2477 "HOME=/",
2478 "PATH=/sbin:/bin:/usr/sbin:/usr/bin",
2479 NULL
2480 };
2481 int ret;
2482
2483 argv = argv_split(GFP_KERNEL, poweroff_cmd, NULL);
2484 if (argv) {
2485 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
2486 argv_free(argv);
2487 } else {
2488 printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
2489 __func__, poweroff_cmd);
2490 ret = -ENOMEM;
2491 }
2492
2493 if (ret && force) {
2494 printk(KERN_WARNING "Failed to start orderly shutdown: "
2495 "forcing the issue\n");
2496 /*
2497 * I guess this should try to kick off some daemon to sync and
2498 * poweroff asap. Or not even bother syncing if we're doing an
2499 * emergency shutdown?
2500 */
2501 emergency_sync();
2502 kernel_power_off();
2503 }
2504
2505 return ret;
2506 }
2507
2508 static bool poweroff_force;
2509
poweroff_work_func(struct work_struct * work)2510 static void poweroff_work_func(struct work_struct *work)
2511 {
2512 __orderly_poweroff(poweroff_force);
2513 }
2514
2515 static DECLARE_WORK(poweroff_work, poweroff_work_func);
2516
2517 /**
2518 * orderly_poweroff - Trigger an orderly system poweroff
2519 * @force: force poweroff if command execution fails
2520 *
2521 * This may be called from any context to trigger a system shutdown.
2522 * If the orderly shutdown fails, it will force an immediate shutdown.
2523 */
orderly_poweroff(bool force)2524 int orderly_poweroff(bool force)
2525 {
2526 if (force) /* do not override the pending "true" */
2527 poweroff_force = true;
2528 schedule_work(&poweroff_work);
2529 return 0;
2530 }
2531 EXPORT_SYMBOL_GPL(orderly_poweroff);
2532
2533 /**
2534 * do_sysinfo - fill in sysinfo struct
2535 * @info: pointer to buffer to fill
2536 */
do_sysinfo(struct sysinfo * info)2537 static int do_sysinfo(struct sysinfo *info)
2538 {
2539 unsigned long mem_total, sav_total;
2540 unsigned int mem_unit, bitcount;
2541 struct timespec tp;
2542
2543 memset(info, 0, sizeof(struct sysinfo));
2544
2545 ktime_get_ts(&tp);
2546 monotonic_to_bootbased(&tp);
2547 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2548
2549 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2550
2551 info->procs = nr_threads;
2552
2553 si_meminfo(info);
2554 si_swapinfo(info);
2555
2556 /*
2557 * If the sum of all the available memory (i.e. ram + swap)
2558 * is less than can be stored in a 32 bit unsigned long then
2559 * we can be binary compatible with 2.2.x kernels. If not,
2560 * well, in that case 2.2.x was broken anyways...
2561 *
2562 * -Erik Andersen <andersee@debian.org>
2563 */
2564
2565 mem_total = info->totalram + info->totalswap;
2566 if (mem_total < info->totalram || mem_total < info->totalswap)
2567 goto out;
2568 bitcount = 0;
2569 mem_unit = info->mem_unit;
2570 while (mem_unit > 1) {
2571 bitcount++;
2572 mem_unit >>= 1;
2573 sav_total = mem_total;
2574 mem_total <<= 1;
2575 if (mem_total < sav_total)
2576 goto out;
2577 }
2578
2579 /*
2580 * If mem_total did not overflow, multiply all memory values by
2581 * info->mem_unit and set it to 1. This leaves things compatible
2582 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2583 * kernels...
2584 */
2585
2586 info->mem_unit = 1;
2587 info->totalram <<= bitcount;
2588 info->freeram <<= bitcount;
2589 info->sharedram <<= bitcount;
2590 info->bufferram <<= bitcount;
2591 info->totalswap <<= bitcount;
2592 info->freeswap <<= bitcount;
2593 info->totalhigh <<= bitcount;
2594 info->freehigh <<= bitcount;
2595
2596 out:
2597 return 0;
2598 }
2599
SYSCALL_DEFINE1(sysinfo,struct sysinfo __user *,info)2600 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2601 {
2602 struct sysinfo val;
2603
2604 do_sysinfo(&val);
2605
2606 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2607 return -EFAULT;
2608
2609 return 0;
2610 }
2611
2612 #ifdef CONFIG_COMPAT
2613 struct compat_sysinfo {
2614 s32 uptime;
2615 u32 loads[3];
2616 u32 totalram;
2617 u32 freeram;
2618 u32 sharedram;
2619 u32 bufferram;
2620 u32 totalswap;
2621 u32 freeswap;
2622 u16 procs;
2623 u16 pad;
2624 u32 totalhigh;
2625 u32 freehigh;
2626 u32 mem_unit;
2627 char _f[20-2*sizeof(u32)-sizeof(int)];
2628 };
2629
COMPAT_SYSCALL_DEFINE1(sysinfo,struct compat_sysinfo __user *,info)2630 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2631 {
2632 struct sysinfo s;
2633
2634 do_sysinfo(&s);
2635
2636 /* Check to see if any memory value is too large for 32-bit and scale
2637 * down if needed
2638 */
2639 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
2640 int bitcount = 0;
2641
2642 while (s.mem_unit < PAGE_SIZE) {
2643 s.mem_unit <<= 1;
2644 bitcount++;
2645 }
2646
2647 s.totalram >>= bitcount;
2648 s.freeram >>= bitcount;
2649 s.sharedram >>= bitcount;
2650 s.bufferram >>= bitcount;
2651 s.totalswap >>= bitcount;
2652 s.freeswap >>= bitcount;
2653 s.totalhigh >>= bitcount;
2654 s.freehigh >>= bitcount;
2655 }
2656
2657 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
2658 __put_user(s.uptime, &info->uptime) ||
2659 __put_user(s.loads[0], &info->loads[0]) ||
2660 __put_user(s.loads[1], &info->loads[1]) ||
2661 __put_user(s.loads[2], &info->loads[2]) ||
2662 __put_user(s.totalram, &info->totalram) ||
2663 __put_user(s.freeram, &info->freeram) ||
2664 __put_user(s.sharedram, &info->sharedram) ||
2665 __put_user(s.bufferram, &info->bufferram) ||
2666 __put_user(s.totalswap, &info->totalswap) ||
2667 __put_user(s.freeswap, &info->freeswap) ||
2668 __put_user(s.procs, &info->procs) ||
2669 __put_user(s.totalhigh, &info->totalhigh) ||
2670 __put_user(s.freehigh, &info->freehigh) ||
2671 __put_user(s.mem_unit, &info->mem_unit))
2672 return -EFAULT;
2673
2674 return 0;
2675 }
2676 #endif /* CONFIG_COMPAT */
2677