1 /*
2 * linux/kernel/sys.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7 #include <linux/export.h>
8 #include <linux/mm.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/reboot.h>
12 #include <linux/prctl.h>
13 #include <linux/highuid.h>
14 #include <linux/fs.h>
15 #include <linux/kmod.h>
16 #include <linux/perf_event.h>
17 #include <linux/resource.h>
18 #include <linux/kernel.h>
19 #include <linux/workqueue.h>
20 #include <linux/capability.h>
21 #include <linux/device.h>
22 #include <linux/key.h>
23 #include <linux/times.h>
24 #include <linux/posix-timers.h>
25 #include <linux/security.h>
26 #include <linux/dcookies.h>
27 #include <linux/suspend.h>
28 #include <linux/tty.h>
29 #include <linux/signal.h>
30 #include <linux/cn_proc.h>
31 #include <linux/getcpu.h>
32 #include <linux/task_io_accounting_ops.h>
33 #include <linux/seccomp.h>
34 #include <linux/cpu.h>
35 #include <linux/personality.h>
36 #include <linux/ptrace.h>
37 #include <linux/fs_struct.h>
38 #include <linux/file.h>
39 #include <linux/mount.h>
40 #include <linux/gfp.h>
41 #include <linux/syscore_ops.h>
42 #include <linux/version.h>
43 #include <linux/ctype.h>
44 #include <linux/mm.h>
45 #include <linux/mempolicy.h>
46
47 #include <linux/compat.h>
48 #include <linux/syscalls.h>
49 #include <linux/kprobes.h>
50 #include <linux/user_namespace.h>
51 #include <linux/binfmts.h>
52
53 #include <linux/sched.h>
54 #include <linux/rcupdate.h>
55 #include <linux/uidgid.h>
56 #include <linux/cred.h>
57
58 #include <linux/kmsg_dump.h>
59 /* Move somewhere else to avoid recompiling? */
60 #include <generated/utsrelease.h>
61
62 #include <asm/uaccess.h>
63 #include <asm/io.h>
64 #include <asm/unistd.h>
65
66 #ifndef SET_UNALIGN_CTL
67 # define SET_UNALIGN_CTL(a, b) (-EINVAL)
68 #endif
69 #ifndef GET_UNALIGN_CTL
70 # define GET_UNALIGN_CTL(a, b) (-EINVAL)
71 #endif
72 #ifndef SET_FPEMU_CTL
73 # define SET_FPEMU_CTL(a, b) (-EINVAL)
74 #endif
75 #ifndef GET_FPEMU_CTL
76 # define GET_FPEMU_CTL(a, b) (-EINVAL)
77 #endif
78 #ifndef SET_FPEXC_CTL
79 # define SET_FPEXC_CTL(a, b) (-EINVAL)
80 #endif
81 #ifndef GET_FPEXC_CTL
82 # define GET_FPEXC_CTL(a, b) (-EINVAL)
83 #endif
84 #ifndef GET_ENDIAN
85 # define GET_ENDIAN(a, b) (-EINVAL)
86 #endif
87 #ifndef SET_ENDIAN
88 # define SET_ENDIAN(a, b) (-EINVAL)
89 #endif
90 #ifndef GET_TSC_CTL
91 # define GET_TSC_CTL(a) (-EINVAL)
92 #endif
93 #ifndef SET_TSC_CTL
94 # define SET_TSC_CTL(a) (-EINVAL)
95 #endif
96 #ifndef MPX_ENABLE_MANAGEMENT
97 # define MPX_ENABLE_MANAGEMENT() (-EINVAL)
98 #endif
99 #ifndef MPX_DISABLE_MANAGEMENT
100 # define MPX_DISABLE_MANAGEMENT() (-EINVAL)
101 #endif
102 #ifndef GET_FP_MODE
103 # define GET_FP_MODE(a) (-EINVAL)
104 #endif
105 #ifndef SET_FP_MODE
106 # define SET_FP_MODE(a,b) (-EINVAL)
107 #endif
108
109 /*
110 * this is where the system-wide overflow UID and GID are defined, for
111 * architectures that now have 32-bit UID/GID but didn't in the past
112 */
113
114 int overflowuid = DEFAULT_OVERFLOWUID;
115 int overflowgid = DEFAULT_OVERFLOWGID;
116
117 EXPORT_SYMBOL(overflowuid);
118 EXPORT_SYMBOL(overflowgid);
119
120 /*
121 * the same as above, but for filesystems which can only store a 16-bit
122 * UID and GID. as such, this is needed on all architectures
123 */
124
125 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
126 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
127
128 EXPORT_SYMBOL(fs_overflowuid);
129 EXPORT_SYMBOL(fs_overflowgid);
130
131 /*
132 * Returns true if current's euid is same as p's uid or euid,
133 * or has CAP_SYS_NICE to p's user_ns.
134 *
135 * Called with rcu_read_lock, creds are safe
136 */
set_one_prio_perm(struct task_struct * p)137 static bool set_one_prio_perm(struct task_struct *p)
138 {
139 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
140
141 if (uid_eq(pcred->uid, cred->euid) ||
142 uid_eq(pcred->euid, cred->euid))
143 return true;
144 if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
145 return true;
146 return false;
147 }
148
149 /*
150 * set the priority of a task
151 * - the caller must hold the RCU read lock
152 */
set_one_prio(struct task_struct * p,int niceval,int error)153 static int set_one_prio(struct task_struct *p, int niceval, int error)
154 {
155 int no_nice;
156
157 if (!set_one_prio_perm(p)) {
158 error = -EPERM;
159 goto out;
160 }
161 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
162 error = -EACCES;
163 goto out;
164 }
165 no_nice = security_task_setnice(p, niceval);
166 if (no_nice) {
167 error = no_nice;
168 goto out;
169 }
170 if (error == -ESRCH)
171 error = 0;
172 set_user_nice(p, niceval);
173 out:
174 return error;
175 }
176
SYSCALL_DEFINE3(setpriority,int,which,int,who,int,niceval)177 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
178 {
179 struct task_struct *g, *p;
180 struct user_struct *user;
181 const struct cred *cred = current_cred();
182 int error = -EINVAL;
183 struct pid *pgrp;
184 kuid_t uid;
185
186 if (which > PRIO_USER || which < PRIO_PROCESS)
187 goto out;
188
189 /* normalize: avoid signed division (rounding problems) */
190 error = -ESRCH;
191 if (niceval < MIN_NICE)
192 niceval = MIN_NICE;
193 if (niceval > MAX_NICE)
194 niceval = MAX_NICE;
195
196 rcu_read_lock();
197 read_lock(&tasklist_lock);
198 switch (which) {
199 case PRIO_PROCESS:
200 if (who)
201 p = find_task_by_vpid(who);
202 else
203 p = current;
204 if (p)
205 error = set_one_prio(p, niceval, error);
206 break;
207 case PRIO_PGRP:
208 if (who)
209 pgrp = find_vpid(who);
210 else
211 pgrp = task_pgrp(current);
212 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
213 error = set_one_prio(p, niceval, error);
214 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
215 break;
216 case PRIO_USER:
217 uid = make_kuid(cred->user_ns, who);
218 user = cred->user;
219 if (!who)
220 uid = cred->uid;
221 else if (!uid_eq(uid, cred->uid)) {
222 user = find_user(uid);
223 if (!user)
224 goto out_unlock; /* No processes for this user */
225 }
226 do_each_thread(g, p) {
227 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
228 error = set_one_prio(p, niceval, error);
229 } while_each_thread(g, p);
230 if (!uid_eq(uid, cred->uid))
231 free_uid(user); /* For find_user() */
232 break;
233 }
234 out_unlock:
235 read_unlock(&tasklist_lock);
236 rcu_read_unlock();
237 out:
238 return error;
239 }
240
241 /*
242 * Ugh. To avoid negative return values, "getpriority()" will
243 * not return the normal nice-value, but a negated value that
244 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
245 * to stay compatible.
246 */
SYSCALL_DEFINE2(getpriority,int,which,int,who)247 SYSCALL_DEFINE2(getpriority, int, which, int, who)
248 {
249 struct task_struct *g, *p;
250 struct user_struct *user;
251 const struct cred *cred = current_cred();
252 long niceval, retval = -ESRCH;
253 struct pid *pgrp;
254 kuid_t uid;
255
256 if (which > PRIO_USER || which < PRIO_PROCESS)
257 return -EINVAL;
258
259 rcu_read_lock();
260 read_lock(&tasklist_lock);
261 switch (which) {
262 case PRIO_PROCESS:
263 if (who)
264 p = find_task_by_vpid(who);
265 else
266 p = current;
267 if (p) {
268 niceval = nice_to_rlimit(task_nice(p));
269 if (niceval > retval)
270 retval = niceval;
271 }
272 break;
273 case PRIO_PGRP:
274 if (who)
275 pgrp = find_vpid(who);
276 else
277 pgrp = task_pgrp(current);
278 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
279 niceval = nice_to_rlimit(task_nice(p));
280 if (niceval > retval)
281 retval = niceval;
282 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
283 break;
284 case PRIO_USER:
285 uid = make_kuid(cred->user_ns, who);
286 user = cred->user;
287 if (!who)
288 uid = cred->uid;
289 else if (!uid_eq(uid, cred->uid)) {
290 user = find_user(uid);
291 if (!user)
292 goto out_unlock; /* No processes for this user */
293 }
294 do_each_thread(g, p) {
295 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
296 niceval = nice_to_rlimit(task_nice(p));
297 if (niceval > retval)
298 retval = niceval;
299 }
300 } while_each_thread(g, p);
301 if (!uid_eq(uid, cred->uid))
302 free_uid(user); /* for find_user() */
303 break;
304 }
305 out_unlock:
306 read_unlock(&tasklist_lock);
307 rcu_read_unlock();
308
309 return retval;
310 }
311
312 /*
313 * Unprivileged users may change the real gid to the effective gid
314 * or vice versa. (BSD-style)
315 *
316 * If you set the real gid at all, or set the effective gid to a value not
317 * equal to the real gid, then the saved gid is set to the new effective gid.
318 *
319 * This makes it possible for a setgid program to completely drop its
320 * privileges, which is often a useful assertion to make when you are doing
321 * a security audit over a program.
322 *
323 * The general idea is that a program which uses just setregid() will be
324 * 100% compatible with BSD. A program which uses just setgid() will be
325 * 100% compatible with POSIX with saved IDs.
326 *
327 * SMP: There are not races, the GIDs are checked only by filesystem
328 * operations (as far as semantic preservation is concerned).
329 */
330 #ifdef CONFIG_MULTIUSER
SYSCALL_DEFINE2(setregid,gid_t,rgid,gid_t,egid)331 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
332 {
333 struct user_namespace *ns = current_user_ns();
334 const struct cred *old;
335 struct cred *new;
336 int retval;
337 kgid_t krgid, kegid;
338
339 krgid = make_kgid(ns, rgid);
340 kegid = make_kgid(ns, egid);
341
342 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
343 return -EINVAL;
344 if ((egid != (gid_t) -1) && !gid_valid(kegid))
345 return -EINVAL;
346
347 new = prepare_creds();
348 if (!new)
349 return -ENOMEM;
350 old = current_cred();
351
352 retval = -EPERM;
353 if (rgid != (gid_t) -1) {
354 if (gid_eq(old->gid, krgid) ||
355 gid_eq(old->egid, krgid) ||
356 ns_capable(old->user_ns, CAP_SETGID))
357 new->gid = krgid;
358 else
359 goto error;
360 }
361 if (egid != (gid_t) -1) {
362 if (gid_eq(old->gid, kegid) ||
363 gid_eq(old->egid, kegid) ||
364 gid_eq(old->sgid, kegid) ||
365 ns_capable(old->user_ns, CAP_SETGID))
366 new->egid = kegid;
367 else
368 goto error;
369 }
370
371 if (rgid != (gid_t) -1 ||
372 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
373 new->sgid = new->egid;
374 new->fsgid = new->egid;
375
376 return commit_creds(new);
377
378 error:
379 abort_creds(new);
380 return retval;
381 }
382
383 /*
384 * setgid() is implemented like SysV w/ SAVED_IDS
385 *
386 * SMP: Same implicit races as above.
387 */
SYSCALL_DEFINE1(setgid,gid_t,gid)388 SYSCALL_DEFINE1(setgid, gid_t, gid)
389 {
390 struct user_namespace *ns = current_user_ns();
391 const struct cred *old;
392 struct cred *new;
393 int retval;
394 kgid_t kgid;
395
396 kgid = make_kgid(ns, gid);
397 if (!gid_valid(kgid))
398 return -EINVAL;
399
400 new = prepare_creds();
401 if (!new)
402 return -ENOMEM;
403 old = current_cred();
404
405 retval = -EPERM;
406 if (ns_capable(old->user_ns, CAP_SETGID))
407 new->gid = new->egid = new->sgid = new->fsgid = kgid;
408 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
409 new->egid = new->fsgid = kgid;
410 else
411 goto error;
412
413 return commit_creds(new);
414
415 error:
416 abort_creds(new);
417 return retval;
418 }
419
420 /*
421 * change the user struct in a credentials set to match the new UID
422 */
set_user(struct cred * new)423 static int set_user(struct cred *new)
424 {
425 struct user_struct *new_user;
426
427 new_user = alloc_uid(new->uid);
428 if (!new_user)
429 return -EAGAIN;
430
431 /*
432 * We don't fail in case of NPROC limit excess here because too many
433 * poorly written programs don't check set*uid() return code, assuming
434 * it never fails if called by root. We may still enforce NPROC limit
435 * for programs doing set*uid()+execve() by harmlessly deferring the
436 * failure to the execve() stage.
437 */
438 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
439 new_user != INIT_USER)
440 current->flags |= PF_NPROC_EXCEEDED;
441 else
442 current->flags &= ~PF_NPROC_EXCEEDED;
443
444 free_uid(new->user);
445 new->user = new_user;
446 return 0;
447 }
448
449 /*
450 * Unprivileged users may change the real uid to the effective uid
451 * or vice versa. (BSD-style)
452 *
453 * If you set the real uid at all, or set the effective uid to a value not
454 * equal to the real uid, then the saved uid is set to the new effective uid.
455 *
456 * This makes it possible for a setuid program to completely drop its
457 * privileges, which is often a useful assertion to make when you are doing
458 * a security audit over a program.
459 *
460 * The general idea is that a program which uses just setreuid() will be
461 * 100% compatible with BSD. A program which uses just setuid() will be
462 * 100% compatible with POSIX with saved IDs.
463 */
SYSCALL_DEFINE2(setreuid,uid_t,ruid,uid_t,euid)464 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
465 {
466 struct user_namespace *ns = current_user_ns();
467 const struct cred *old;
468 struct cred *new;
469 int retval;
470 kuid_t kruid, keuid;
471
472 kruid = make_kuid(ns, ruid);
473 keuid = make_kuid(ns, euid);
474
475 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
476 return -EINVAL;
477 if ((euid != (uid_t) -1) && !uid_valid(keuid))
478 return -EINVAL;
479
480 new = prepare_creds();
481 if (!new)
482 return -ENOMEM;
483 old = current_cred();
484
485 retval = -EPERM;
486 if (ruid != (uid_t) -1) {
487 new->uid = kruid;
488 if (!uid_eq(old->uid, kruid) &&
489 !uid_eq(old->euid, kruid) &&
490 !ns_capable(old->user_ns, CAP_SETUID))
491 goto error;
492 }
493
494 if (euid != (uid_t) -1) {
495 new->euid = keuid;
496 if (!uid_eq(old->uid, keuid) &&
497 !uid_eq(old->euid, keuid) &&
498 !uid_eq(old->suid, keuid) &&
499 !ns_capable(old->user_ns, CAP_SETUID))
500 goto error;
501 }
502
503 if (!uid_eq(new->uid, old->uid)) {
504 retval = set_user(new);
505 if (retval < 0)
506 goto error;
507 }
508 if (ruid != (uid_t) -1 ||
509 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
510 new->suid = new->euid;
511 new->fsuid = new->euid;
512
513 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
514 if (retval < 0)
515 goto error;
516
517 return commit_creds(new);
518
519 error:
520 abort_creds(new);
521 return retval;
522 }
523
524 /*
525 * setuid() is implemented like SysV with SAVED_IDS
526 *
527 * Note that SAVED_ID's is deficient in that a setuid root program
528 * like sendmail, for example, cannot set its uid to be a normal
529 * user and then switch back, because if you're root, setuid() sets
530 * the saved uid too. If you don't like this, blame the bright people
531 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
532 * will allow a root program to temporarily drop privileges and be able to
533 * regain them by swapping the real and effective uid.
534 */
SYSCALL_DEFINE1(setuid,uid_t,uid)535 SYSCALL_DEFINE1(setuid, uid_t, uid)
536 {
537 struct user_namespace *ns = current_user_ns();
538 const struct cred *old;
539 struct cred *new;
540 int retval;
541 kuid_t kuid;
542
543 kuid = make_kuid(ns, uid);
544 if (!uid_valid(kuid))
545 return -EINVAL;
546
547 new = prepare_creds();
548 if (!new)
549 return -ENOMEM;
550 old = current_cred();
551
552 retval = -EPERM;
553 if (ns_capable(old->user_ns, CAP_SETUID)) {
554 new->suid = new->uid = kuid;
555 if (!uid_eq(kuid, old->uid)) {
556 retval = set_user(new);
557 if (retval < 0)
558 goto error;
559 }
560 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
561 goto error;
562 }
563
564 new->fsuid = new->euid = kuid;
565
566 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
567 if (retval < 0)
568 goto error;
569
570 return commit_creds(new);
571
572 error:
573 abort_creds(new);
574 return retval;
575 }
576
577
578 /*
579 * This function implements a generic ability to update ruid, euid,
580 * and suid. This allows you to implement the 4.4 compatible seteuid().
581 */
SYSCALL_DEFINE3(setresuid,uid_t,ruid,uid_t,euid,uid_t,suid)582 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
583 {
584 struct user_namespace *ns = current_user_ns();
585 const struct cred *old;
586 struct cred *new;
587 int retval;
588 kuid_t kruid, keuid, ksuid;
589
590 kruid = make_kuid(ns, ruid);
591 keuid = make_kuid(ns, euid);
592 ksuid = make_kuid(ns, suid);
593
594 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
595 return -EINVAL;
596
597 if ((euid != (uid_t) -1) && !uid_valid(keuid))
598 return -EINVAL;
599
600 if ((suid != (uid_t) -1) && !uid_valid(ksuid))
601 return -EINVAL;
602
603 new = prepare_creds();
604 if (!new)
605 return -ENOMEM;
606
607 old = current_cred();
608
609 retval = -EPERM;
610 if (!ns_capable(old->user_ns, CAP_SETUID)) {
611 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
612 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
613 goto error;
614 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
615 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
616 goto error;
617 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
618 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
619 goto error;
620 }
621
622 if (ruid != (uid_t) -1) {
623 new->uid = kruid;
624 if (!uid_eq(kruid, old->uid)) {
625 retval = set_user(new);
626 if (retval < 0)
627 goto error;
628 }
629 }
630 if (euid != (uid_t) -1)
631 new->euid = keuid;
632 if (suid != (uid_t) -1)
633 new->suid = ksuid;
634 new->fsuid = new->euid;
635
636 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
637 if (retval < 0)
638 goto error;
639
640 return commit_creds(new);
641
642 error:
643 abort_creds(new);
644 return retval;
645 }
646
SYSCALL_DEFINE3(getresuid,uid_t __user *,ruidp,uid_t __user *,euidp,uid_t __user *,suidp)647 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
648 {
649 const struct cred *cred = current_cred();
650 int retval;
651 uid_t ruid, euid, suid;
652
653 ruid = from_kuid_munged(cred->user_ns, cred->uid);
654 euid = from_kuid_munged(cred->user_ns, cred->euid);
655 suid = from_kuid_munged(cred->user_ns, cred->suid);
656
657 retval = put_user(ruid, ruidp);
658 if (!retval) {
659 retval = put_user(euid, euidp);
660 if (!retval)
661 return put_user(suid, suidp);
662 }
663 return retval;
664 }
665
666 /*
667 * Same as above, but for rgid, egid, sgid.
668 */
SYSCALL_DEFINE3(setresgid,gid_t,rgid,gid_t,egid,gid_t,sgid)669 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
670 {
671 struct user_namespace *ns = current_user_ns();
672 const struct cred *old;
673 struct cred *new;
674 int retval;
675 kgid_t krgid, kegid, ksgid;
676
677 krgid = make_kgid(ns, rgid);
678 kegid = make_kgid(ns, egid);
679 ksgid = make_kgid(ns, sgid);
680
681 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
682 return -EINVAL;
683 if ((egid != (gid_t) -1) && !gid_valid(kegid))
684 return -EINVAL;
685 if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
686 return -EINVAL;
687
688 new = prepare_creds();
689 if (!new)
690 return -ENOMEM;
691 old = current_cred();
692
693 retval = -EPERM;
694 if (!ns_capable(old->user_ns, CAP_SETGID)) {
695 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
696 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
697 goto error;
698 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
699 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
700 goto error;
701 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
702 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
703 goto error;
704 }
705
706 if (rgid != (gid_t) -1)
707 new->gid = krgid;
708 if (egid != (gid_t) -1)
709 new->egid = kegid;
710 if (sgid != (gid_t) -1)
711 new->sgid = ksgid;
712 new->fsgid = new->egid;
713
714 return commit_creds(new);
715
716 error:
717 abort_creds(new);
718 return retval;
719 }
720
SYSCALL_DEFINE3(getresgid,gid_t __user *,rgidp,gid_t __user *,egidp,gid_t __user *,sgidp)721 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
722 {
723 const struct cred *cred = current_cred();
724 int retval;
725 gid_t rgid, egid, sgid;
726
727 rgid = from_kgid_munged(cred->user_ns, cred->gid);
728 egid = from_kgid_munged(cred->user_ns, cred->egid);
729 sgid = from_kgid_munged(cred->user_ns, cred->sgid);
730
731 retval = put_user(rgid, rgidp);
732 if (!retval) {
733 retval = put_user(egid, egidp);
734 if (!retval)
735 retval = put_user(sgid, sgidp);
736 }
737
738 return retval;
739 }
740
741
742 /*
743 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
744 * is used for "access()" and for the NFS daemon (letting nfsd stay at
745 * whatever uid it wants to). It normally shadows "euid", except when
746 * explicitly set by setfsuid() or for access..
747 */
SYSCALL_DEFINE1(setfsuid,uid_t,uid)748 SYSCALL_DEFINE1(setfsuid, uid_t, uid)
749 {
750 const struct cred *old;
751 struct cred *new;
752 uid_t old_fsuid;
753 kuid_t kuid;
754
755 old = current_cred();
756 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
757
758 kuid = make_kuid(old->user_ns, uid);
759 if (!uid_valid(kuid))
760 return old_fsuid;
761
762 new = prepare_creds();
763 if (!new)
764 return old_fsuid;
765
766 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
767 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
768 ns_capable(old->user_ns, CAP_SETUID)) {
769 if (!uid_eq(kuid, old->fsuid)) {
770 new->fsuid = kuid;
771 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
772 goto change_okay;
773 }
774 }
775
776 abort_creds(new);
777 return old_fsuid;
778
779 change_okay:
780 commit_creds(new);
781 return old_fsuid;
782 }
783
784 /*
785 * Samma på svenska..
786 */
SYSCALL_DEFINE1(setfsgid,gid_t,gid)787 SYSCALL_DEFINE1(setfsgid, gid_t, gid)
788 {
789 const struct cred *old;
790 struct cred *new;
791 gid_t old_fsgid;
792 kgid_t kgid;
793
794 old = current_cred();
795 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
796
797 kgid = make_kgid(old->user_ns, gid);
798 if (!gid_valid(kgid))
799 return old_fsgid;
800
801 new = prepare_creds();
802 if (!new)
803 return old_fsgid;
804
805 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
806 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
807 ns_capable(old->user_ns, CAP_SETGID)) {
808 if (!gid_eq(kgid, old->fsgid)) {
809 new->fsgid = kgid;
810 goto change_okay;
811 }
812 }
813
814 abort_creds(new);
815 return old_fsgid;
816
817 change_okay:
818 commit_creds(new);
819 return old_fsgid;
820 }
821 #endif /* CONFIG_MULTIUSER */
822
823 /**
824 * sys_getpid - return the thread group id of the current process
825 *
826 * Note, despite the name, this returns the tgid not the pid. The tgid and
827 * the pid are identical unless CLONE_THREAD was specified on clone() in
828 * which case the tgid is the same in all threads of the same group.
829 *
830 * This is SMP safe as current->tgid does not change.
831 */
SYSCALL_DEFINE0(getpid)832 SYSCALL_DEFINE0(getpid)
833 {
834 return task_tgid_vnr(current);
835 }
836
837 /* Thread ID - the internal kernel "pid" */
SYSCALL_DEFINE0(gettid)838 SYSCALL_DEFINE0(gettid)
839 {
840 return task_pid_vnr(current);
841 }
842
843 /*
844 * Accessing ->real_parent is not SMP-safe, it could
845 * change from under us. However, we can use a stale
846 * value of ->real_parent under rcu_read_lock(), see
847 * release_task()->call_rcu(delayed_put_task_struct).
848 */
SYSCALL_DEFINE0(getppid)849 SYSCALL_DEFINE0(getppid)
850 {
851 int pid;
852
853 rcu_read_lock();
854 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
855 rcu_read_unlock();
856
857 return pid;
858 }
859
SYSCALL_DEFINE0(getuid)860 SYSCALL_DEFINE0(getuid)
861 {
862 /* Only we change this so SMP safe */
863 return from_kuid_munged(current_user_ns(), current_uid());
864 }
865
SYSCALL_DEFINE0(geteuid)866 SYSCALL_DEFINE0(geteuid)
867 {
868 /* Only we change this so SMP safe */
869 return from_kuid_munged(current_user_ns(), current_euid());
870 }
871
SYSCALL_DEFINE0(getgid)872 SYSCALL_DEFINE0(getgid)
873 {
874 /* Only we change this so SMP safe */
875 return from_kgid_munged(current_user_ns(), current_gid());
876 }
877
SYSCALL_DEFINE0(getegid)878 SYSCALL_DEFINE0(getegid)
879 {
880 /* Only we change this so SMP safe */
881 return from_kgid_munged(current_user_ns(), current_egid());
882 }
883
do_sys_times(struct tms * tms)884 void do_sys_times(struct tms *tms)
885 {
886 cputime_t tgutime, tgstime, cutime, cstime;
887
888 thread_group_cputime_adjusted(current, &tgutime, &tgstime);
889 cutime = current->signal->cutime;
890 cstime = current->signal->cstime;
891 tms->tms_utime = cputime_to_clock_t(tgutime);
892 tms->tms_stime = cputime_to_clock_t(tgstime);
893 tms->tms_cutime = cputime_to_clock_t(cutime);
894 tms->tms_cstime = cputime_to_clock_t(cstime);
895 }
896
SYSCALL_DEFINE1(times,struct tms __user *,tbuf)897 SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
898 {
899 if (tbuf) {
900 struct tms tmp;
901
902 do_sys_times(&tmp);
903 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
904 return -EFAULT;
905 }
906 force_successful_syscall_return();
907 return (long) jiffies_64_to_clock_t(get_jiffies_64());
908 }
909
910 /*
911 * This needs some heavy checking ...
912 * I just haven't the stomach for it. I also don't fully
913 * understand sessions/pgrp etc. Let somebody who does explain it.
914 *
915 * OK, I think I have the protection semantics right.... this is really
916 * only important on a multi-user system anyway, to make sure one user
917 * can't send a signal to a process owned by another. -TYT, 12/12/91
918 *
919 * !PF_FORKNOEXEC check to conform completely to POSIX.
920 */
SYSCALL_DEFINE2(setpgid,pid_t,pid,pid_t,pgid)921 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
922 {
923 struct task_struct *p;
924 struct task_struct *group_leader = current->group_leader;
925 struct pid *pgrp;
926 int err;
927
928 if (!pid)
929 pid = task_pid_vnr(group_leader);
930 if (!pgid)
931 pgid = pid;
932 if (pgid < 0)
933 return -EINVAL;
934 rcu_read_lock();
935
936 /* From this point forward we keep holding onto the tasklist lock
937 * so that our parent does not change from under us. -DaveM
938 */
939 write_lock_irq(&tasklist_lock);
940
941 err = -ESRCH;
942 p = find_task_by_vpid(pid);
943 if (!p)
944 goto out;
945
946 err = -EINVAL;
947 if (!thread_group_leader(p))
948 goto out;
949
950 if (same_thread_group(p->real_parent, group_leader)) {
951 err = -EPERM;
952 if (task_session(p) != task_session(group_leader))
953 goto out;
954 err = -EACCES;
955 if (!(p->flags & PF_FORKNOEXEC))
956 goto out;
957 } else {
958 err = -ESRCH;
959 if (p != group_leader)
960 goto out;
961 }
962
963 err = -EPERM;
964 if (p->signal->leader)
965 goto out;
966
967 pgrp = task_pid(p);
968 if (pgid != pid) {
969 struct task_struct *g;
970
971 pgrp = find_vpid(pgid);
972 g = pid_task(pgrp, PIDTYPE_PGID);
973 if (!g || task_session(g) != task_session(group_leader))
974 goto out;
975 }
976
977 err = security_task_setpgid(p, pgid);
978 if (err)
979 goto out;
980
981 if (task_pgrp(p) != pgrp)
982 change_pid(p, PIDTYPE_PGID, pgrp);
983
984 err = 0;
985 out:
986 /* All paths lead to here, thus we are safe. -DaveM */
987 write_unlock_irq(&tasklist_lock);
988 rcu_read_unlock();
989 return err;
990 }
991
SYSCALL_DEFINE1(getpgid,pid_t,pid)992 SYSCALL_DEFINE1(getpgid, pid_t, pid)
993 {
994 struct task_struct *p;
995 struct pid *grp;
996 int retval;
997
998 rcu_read_lock();
999 if (!pid)
1000 grp = task_pgrp(current);
1001 else {
1002 retval = -ESRCH;
1003 p = find_task_by_vpid(pid);
1004 if (!p)
1005 goto out;
1006 grp = task_pgrp(p);
1007 if (!grp)
1008 goto out;
1009
1010 retval = security_task_getpgid(p);
1011 if (retval)
1012 goto out;
1013 }
1014 retval = pid_vnr(grp);
1015 out:
1016 rcu_read_unlock();
1017 return retval;
1018 }
1019
1020 #ifdef __ARCH_WANT_SYS_GETPGRP
1021
SYSCALL_DEFINE0(getpgrp)1022 SYSCALL_DEFINE0(getpgrp)
1023 {
1024 return sys_getpgid(0);
1025 }
1026
1027 #endif
1028
SYSCALL_DEFINE1(getsid,pid_t,pid)1029 SYSCALL_DEFINE1(getsid, pid_t, pid)
1030 {
1031 struct task_struct *p;
1032 struct pid *sid;
1033 int retval;
1034
1035 rcu_read_lock();
1036 if (!pid)
1037 sid = task_session(current);
1038 else {
1039 retval = -ESRCH;
1040 p = find_task_by_vpid(pid);
1041 if (!p)
1042 goto out;
1043 sid = task_session(p);
1044 if (!sid)
1045 goto out;
1046
1047 retval = security_task_getsid(p);
1048 if (retval)
1049 goto out;
1050 }
1051 retval = pid_vnr(sid);
1052 out:
1053 rcu_read_unlock();
1054 return retval;
1055 }
1056
set_special_pids(struct pid * pid)1057 static void set_special_pids(struct pid *pid)
1058 {
1059 struct task_struct *curr = current->group_leader;
1060
1061 if (task_session(curr) != pid)
1062 change_pid(curr, PIDTYPE_SID, pid);
1063
1064 if (task_pgrp(curr) != pid)
1065 change_pid(curr, PIDTYPE_PGID, pid);
1066 }
1067
SYSCALL_DEFINE0(setsid)1068 SYSCALL_DEFINE0(setsid)
1069 {
1070 struct task_struct *group_leader = current->group_leader;
1071 struct pid *sid = task_pid(group_leader);
1072 pid_t session = pid_vnr(sid);
1073 int err = -EPERM;
1074
1075 write_lock_irq(&tasklist_lock);
1076 /* Fail if I am already a session leader */
1077 if (group_leader->signal->leader)
1078 goto out;
1079
1080 /* Fail if a process group id already exists that equals the
1081 * proposed session id.
1082 */
1083 if (pid_task(sid, PIDTYPE_PGID))
1084 goto out;
1085
1086 group_leader->signal->leader = 1;
1087 set_special_pids(sid);
1088
1089 proc_clear_tty(group_leader);
1090
1091 err = session;
1092 out:
1093 write_unlock_irq(&tasklist_lock);
1094 if (err > 0) {
1095 proc_sid_connector(group_leader);
1096 sched_autogroup_create_attach(group_leader);
1097 }
1098 return err;
1099 }
1100
1101 DECLARE_RWSEM(uts_sem);
1102
1103 #ifdef COMPAT_UTS_MACHINE
1104 #define override_architecture(name) \
1105 (personality(current->personality) == PER_LINUX32 && \
1106 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1107 sizeof(COMPAT_UTS_MACHINE)))
1108 #else
1109 #define override_architecture(name) 0
1110 #endif
1111
1112 /*
1113 * Work around broken programs that cannot handle "Linux 3.0".
1114 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1115 * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60.
1116 */
override_release(char __user * release,size_t len)1117 static int override_release(char __user *release, size_t len)
1118 {
1119 int ret = 0;
1120
1121 if (current->personality & UNAME26) {
1122 const char *rest = UTS_RELEASE;
1123 char buf[65] = { 0 };
1124 int ndots = 0;
1125 unsigned v;
1126 size_t copy;
1127
1128 while (*rest) {
1129 if (*rest == '.' && ++ndots >= 3)
1130 break;
1131 if (!isdigit(*rest) && *rest != '.')
1132 break;
1133 rest++;
1134 }
1135 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
1136 copy = clamp_t(size_t, len, 1, sizeof(buf));
1137 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1138 ret = copy_to_user(release, buf, copy + 1);
1139 }
1140 return ret;
1141 }
1142
SYSCALL_DEFINE1(newuname,struct new_utsname __user *,name)1143 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1144 {
1145 int errno = 0;
1146
1147 down_read(&uts_sem);
1148 if (copy_to_user(name, utsname(), sizeof *name))
1149 errno = -EFAULT;
1150 up_read(&uts_sem);
1151
1152 if (!errno && override_release(name->release, sizeof(name->release)))
1153 errno = -EFAULT;
1154 if (!errno && override_architecture(name))
1155 errno = -EFAULT;
1156 return errno;
1157 }
1158
1159 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1160 /*
1161 * Old cruft
1162 */
SYSCALL_DEFINE1(uname,struct old_utsname __user *,name)1163 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1164 {
1165 int error = 0;
1166
1167 if (!name)
1168 return -EFAULT;
1169
1170 down_read(&uts_sem);
1171 if (copy_to_user(name, utsname(), sizeof(*name)))
1172 error = -EFAULT;
1173 up_read(&uts_sem);
1174
1175 if (!error && override_release(name->release, sizeof(name->release)))
1176 error = -EFAULT;
1177 if (!error && override_architecture(name))
1178 error = -EFAULT;
1179 return error;
1180 }
1181
SYSCALL_DEFINE1(olduname,struct oldold_utsname __user *,name)1182 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1183 {
1184 int error;
1185
1186 if (!name)
1187 return -EFAULT;
1188 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1189 return -EFAULT;
1190
1191 down_read(&uts_sem);
1192 error = __copy_to_user(&name->sysname, &utsname()->sysname,
1193 __OLD_UTS_LEN);
1194 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1195 error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1196 __OLD_UTS_LEN);
1197 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1198 error |= __copy_to_user(&name->release, &utsname()->release,
1199 __OLD_UTS_LEN);
1200 error |= __put_user(0, name->release + __OLD_UTS_LEN);
1201 error |= __copy_to_user(&name->version, &utsname()->version,
1202 __OLD_UTS_LEN);
1203 error |= __put_user(0, name->version + __OLD_UTS_LEN);
1204 error |= __copy_to_user(&name->machine, &utsname()->machine,
1205 __OLD_UTS_LEN);
1206 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1207 up_read(&uts_sem);
1208
1209 if (!error && override_architecture(name))
1210 error = -EFAULT;
1211 if (!error && override_release(name->release, sizeof(name->release)))
1212 error = -EFAULT;
1213 return error ? -EFAULT : 0;
1214 }
1215 #endif
1216
SYSCALL_DEFINE2(sethostname,char __user *,name,int,len)1217 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1218 {
1219 int errno;
1220 char tmp[__NEW_UTS_LEN];
1221
1222 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1223 return -EPERM;
1224
1225 if (len < 0 || len > __NEW_UTS_LEN)
1226 return -EINVAL;
1227 down_write(&uts_sem);
1228 errno = -EFAULT;
1229 if (!copy_from_user(tmp, name, len)) {
1230 struct new_utsname *u = utsname();
1231
1232 memcpy(u->nodename, tmp, len);
1233 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1234 errno = 0;
1235 uts_proc_notify(UTS_PROC_HOSTNAME);
1236 }
1237 up_write(&uts_sem);
1238 return errno;
1239 }
1240
1241 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1242
SYSCALL_DEFINE2(gethostname,char __user *,name,int,len)1243 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1244 {
1245 int i, errno;
1246 struct new_utsname *u;
1247
1248 if (len < 0)
1249 return -EINVAL;
1250 down_read(&uts_sem);
1251 u = utsname();
1252 i = 1 + strlen(u->nodename);
1253 if (i > len)
1254 i = len;
1255 errno = 0;
1256 if (copy_to_user(name, u->nodename, i))
1257 errno = -EFAULT;
1258 up_read(&uts_sem);
1259 return errno;
1260 }
1261
1262 #endif
1263
1264 /*
1265 * Only setdomainname; getdomainname can be implemented by calling
1266 * uname()
1267 */
SYSCALL_DEFINE2(setdomainname,char __user *,name,int,len)1268 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1269 {
1270 int errno;
1271 char tmp[__NEW_UTS_LEN];
1272
1273 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1274 return -EPERM;
1275 if (len < 0 || len > __NEW_UTS_LEN)
1276 return -EINVAL;
1277
1278 down_write(&uts_sem);
1279 errno = -EFAULT;
1280 if (!copy_from_user(tmp, name, len)) {
1281 struct new_utsname *u = utsname();
1282
1283 memcpy(u->domainname, tmp, len);
1284 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1285 errno = 0;
1286 uts_proc_notify(UTS_PROC_DOMAINNAME);
1287 }
1288 up_write(&uts_sem);
1289 return errno;
1290 }
1291
SYSCALL_DEFINE2(getrlimit,unsigned int,resource,struct rlimit __user *,rlim)1292 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1293 {
1294 struct rlimit value;
1295 int ret;
1296
1297 ret = do_prlimit(current, resource, NULL, &value);
1298 if (!ret)
1299 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1300
1301 return ret;
1302 }
1303
1304 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1305
1306 /*
1307 * Back compatibility for getrlimit. Needed for some apps.
1308 */
SYSCALL_DEFINE2(old_getrlimit,unsigned int,resource,struct rlimit __user *,rlim)1309 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1310 struct rlimit __user *, rlim)
1311 {
1312 struct rlimit x;
1313 if (resource >= RLIM_NLIMITS)
1314 return -EINVAL;
1315
1316 task_lock(current->group_leader);
1317 x = current->signal->rlim[resource];
1318 task_unlock(current->group_leader);
1319 if (x.rlim_cur > 0x7FFFFFFF)
1320 x.rlim_cur = 0x7FFFFFFF;
1321 if (x.rlim_max > 0x7FFFFFFF)
1322 x.rlim_max = 0x7FFFFFFF;
1323 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1324 }
1325
1326 #endif
1327
rlim64_is_infinity(__u64 rlim64)1328 static inline bool rlim64_is_infinity(__u64 rlim64)
1329 {
1330 #if BITS_PER_LONG < 64
1331 return rlim64 >= ULONG_MAX;
1332 #else
1333 return rlim64 == RLIM64_INFINITY;
1334 #endif
1335 }
1336
rlim_to_rlim64(const struct rlimit * rlim,struct rlimit64 * rlim64)1337 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1338 {
1339 if (rlim->rlim_cur == RLIM_INFINITY)
1340 rlim64->rlim_cur = RLIM64_INFINITY;
1341 else
1342 rlim64->rlim_cur = rlim->rlim_cur;
1343 if (rlim->rlim_max == RLIM_INFINITY)
1344 rlim64->rlim_max = RLIM64_INFINITY;
1345 else
1346 rlim64->rlim_max = rlim->rlim_max;
1347 }
1348
rlim64_to_rlim(const struct rlimit64 * rlim64,struct rlimit * rlim)1349 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1350 {
1351 if (rlim64_is_infinity(rlim64->rlim_cur))
1352 rlim->rlim_cur = RLIM_INFINITY;
1353 else
1354 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1355 if (rlim64_is_infinity(rlim64->rlim_max))
1356 rlim->rlim_max = RLIM_INFINITY;
1357 else
1358 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1359 }
1360
1361 /* make sure you are allowed to change @tsk limits before calling this */
do_prlimit(struct task_struct * tsk,unsigned int resource,struct rlimit * new_rlim,struct rlimit * old_rlim)1362 int do_prlimit(struct task_struct *tsk, unsigned int resource,
1363 struct rlimit *new_rlim, struct rlimit *old_rlim)
1364 {
1365 struct rlimit *rlim;
1366 int retval = 0;
1367
1368 if (resource >= RLIM_NLIMITS)
1369 return -EINVAL;
1370 if (new_rlim) {
1371 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1372 return -EINVAL;
1373 if (resource == RLIMIT_NOFILE &&
1374 new_rlim->rlim_max > sysctl_nr_open)
1375 return -EPERM;
1376 }
1377
1378 /* protect tsk->signal and tsk->sighand from disappearing */
1379 read_lock(&tasklist_lock);
1380 if (!tsk->sighand) {
1381 retval = -ESRCH;
1382 goto out;
1383 }
1384
1385 rlim = tsk->signal->rlim + resource;
1386 task_lock(tsk->group_leader);
1387 if (new_rlim) {
1388 /* Keep the capable check against init_user_ns until
1389 cgroups can contain all limits */
1390 if (new_rlim->rlim_max > rlim->rlim_max &&
1391 !capable(CAP_SYS_RESOURCE))
1392 retval = -EPERM;
1393 if (!retval)
1394 retval = security_task_setrlimit(tsk->group_leader,
1395 resource, new_rlim);
1396 if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1397 /*
1398 * The caller is asking for an immediate RLIMIT_CPU
1399 * expiry. But we use the zero value to mean "it was
1400 * never set". So let's cheat and make it one second
1401 * instead
1402 */
1403 new_rlim->rlim_cur = 1;
1404 }
1405 }
1406 if (!retval) {
1407 if (old_rlim)
1408 *old_rlim = *rlim;
1409 if (new_rlim)
1410 *rlim = *new_rlim;
1411 }
1412 task_unlock(tsk->group_leader);
1413
1414 /*
1415 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1416 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1417 * very long-standing error, and fixing it now risks breakage of
1418 * applications, so we live with it
1419 */
1420 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1421 new_rlim->rlim_cur != RLIM_INFINITY)
1422 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1423 out:
1424 read_unlock(&tasklist_lock);
1425 return retval;
1426 }
1427
1428 /* rcu lock must be held */
check_prlimit_permission(struct task_struct * task)1429 static int check_prlimit_permission(struct task_struct *task)
1430 {
1431 const struct cred *cred = current_cred(), *tcred;
1432
1433 if (current == task)
1434 return 0;
1435
1436 tcred = __task_cred(task);
1437 if (uid_eq(cred->uid, tcred->euid) &&
1438 uid_eq(cred->uid, tcred->suid) &&
1439 uid_eq(cred->uid, tcred->uid) &&
1440 gid_eq(cred->gid, tcred->egid) &&
1441 gid_eq(cred->gid, tcred->sgid) &&
1442 gid_eq(cred->gid, tcred->gid))
1443 return 0;
1444 if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1445 return 0;
1446
1447 return -EPERM;
1448 }
1449
SYSCALL_DEFINE4(prlimit64,pid_t,pid,unsigned int,resource,const struct rlimit64 __user *,new_rlim,struct rlimit64 __user *,old_rlim)1450 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1451 const struct rlimit64 __user *, new_rlim,
1452 struct rlimit64 __user *, old_rlim)
1453 {
1454 struct rlimit64 old64, new64;
1455 struct rlimit old, new;
1456 struct task_struct *tsk;
1457 int ret;
1458
1459 if (new_rlim) {
1460 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1461 return -EFAULT;
1462 rlim64_to_rlim(&new64, &new);
1463 }
1464
1465 rcu_read_lock();
1466 tsk = pid ? find_task_by_vpid(pid) : current;
1467 if (!tsk) {
1468 rcu_read_unlock();
1469 return -ESRCH;
1470 }
1471 ret = check_prlimit_permission(tsk);
1472 if (ret) {
1473 rcu_read_unlock();
1474 return ret;
1475 }
1476 get_task_struct(tsk);
1477 rcu_read_unlock();
1478
1479 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1480 old_rlim ? &old : NULL);
1481
1482 if (!ret && old_rlim) {
1483 rlim_to_rlim64(&old, &old64);
1484 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1485 ret = -EFAULT;
1486 }
1487
1488 put_task_struct(tsk);
1489 return ret;
1490 }
1491
SYSCALL_DEFINE2(setrlimit,unsigned int,resource,struct rlimit __user *,rlim)1492 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1493 {
1494 struct rlimit new_rlim;
1495
1496 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1497 return -EFAULT;
1498 return do_prlimit(current, resource, &new_rlim, NULL);
1499 }
1500
1501 /*
1502 * It would make sense to put struct rusage in the task_struct,
1503 * except that would make the task_struct be *really big*. After
1504 * task_struct gets moved into malloc'ed memory, it would
1505 * make sense to do this. It will make moving the rest of the information
1506 * a lot simpler! (Which we're not doing right now because we're not
1507 * measuring them yet).
1508 *
1509 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1510 * races with threads incrementing their own counters. But since word
1511 * reads are atomic, we either get new values or old values and we don't
1512 * care which for the sums. We always take the siglock to protect reading
1513 * the c* fields from p->signal from races with exit.c updating those
1514 * fields when reaping, so a sample either gets all the additions of a
1515 * given child after it's reaped, or none so this sample is before reaping.
1516 *
1517 * Locking:
1518 * We need to take the siglock for CHILDEREN, SELF and BOTH
1519 * for the cases current multithreaded, non-current single threaded
1520 * non-current multithreaded. Thread traversal is now safe with
1521 * the siglock held.
1522 * Strictly speaking, we donot need to take the siglock if we are current and
1523 * single threaded, as no one else can take our signal_struct away, no one
1524 * else can reap the children to update signal->c* counters, and no one else
1525 * can race with the signal-> fields. If we do not take any lock, the
1526 * signal-> fields could be read out of order while another thread was just
1527 * exiting. So we should place a read memory barrier when we avoid the lock.
1528 * On the writer side, write memory barrier is implied in __exit_signal
1529 * as __exit_signal releases the siglock spinlock after updating the signal->
1530 * fields. But we don't do this yet to keep things simple.
1531 *
1532 */
1533
accumulate_thread_rusage(struct task_struct * t,struct rusage * r)1534 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1535 {
1536 r->ru_nvcsw += t->nvcsw;
1537 r->ru_nivcsw += t->nivcsw;
1538 r->ru_minflt += t->min_flt;
1539 r->ru_majflt += t->maj_flt;
1540 r->ru_inblock += task_io_get_inblock(t);
1541 r->ru_oublock += task_io_get_oublock(t);
1542 }
1543
k_getrusage(struct task_struct * p,int who,struct rusage * r)1544 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1545 {
1546 struct task_struct *t;
1547 unsigned long flags;
1548 cputime_t tgutime, tgstime, utime, stime;
1549 unsigned long maxrss = 0;
1550
1551 memset((char *)r, 0, sizeof (*r));
1552 utime = stime = 0;
1553
1554 if (who == RUSAGE_THREAD) {
1555 task_cputime_adjusted(current, &utime, &stime);
1556 accumulate_thread_rusage(p, r);
1557 maxrss = p->signal->maxrss;
1558 goto out;
1559 }
1560
1561 if (!lock_task_sighand(p, &flags))
1562 return;
1563
1564 switch (who) {
1565 case RUSAGE_BOTH:
1566 case RUSAGE_CHILDREN:
1567 utime = p->signal->cutime;
1568 stime = p->signal->cstime;
1569 r->ru_nvcsw = p->signal->cnvcsw;
1570 r->ru_nivcsw = p->signal->cnivcsw;
1571 r->ru_minflt = p->signal->cmin_flt;
1572 r->ru_majflt = p->signal->cmaj_flt;
1573 r->ru_inblock = p->signal->cinblock;
1574 r->ru_oublock = p->signal->coublock;
1575 maxrss = p->signal->cmaxrss;
1576
1577 if (who == RUSAGE_CHILDREN)
1578 break;
1579
1580 case RUSAGE_SELF:
1581 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1582 utime += tgutime;
1583 stime += tgstime;
1584 r->ru_nvcsw += p->signal->nvcsw;
1585 r->ru_nivcsw += p->signal->nivcsw;
1586 r->ru_minflt += p->signal->min_flt;
1587 r->ru_majflt += p->signal->maj_flt;
1588 r->ru_inblock += p->signal->inblock;
1589 r->ru_oublock += p->signal->oublock;
1590 if (maxrss < p->signal->maxrss)
1591 maxrss = p->signal->maxrss;
1592 t = p;
1593 do {
1594 accumulate_thread_rusage(t, r);
1595 } while_each_thread(p, t);
1596 break;
1597
1598 default:
1599 BUG();
1600 }
1601 unlock_task_sighand(p, &flags);
1602
1603 out:
1604 cputime_to_timeval(utime, &r->ru_utime);
1605 cputime_to_timeval(stime, &r->ru_stime);
1606
1607 if (who != RUSAGE_CHILDREN) {
1608 struct mm_struct *mm = get_task_mm(p);
1609
1610 if (mm) {
1611 setmax_mm_hiwater_rss(&maxrss, mm);
1612 mmput(mm);
1613 }
1614 }
1615 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1616 }
1617
getrusage(struct task_struct * p,int who,struct rusage __user * ru)1618 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1619 {
1620 struct rusage r;
1621
1622 k_getrusage(p, who, &r);
1623 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1624 }
1625
SYSCALL_DEFINE2(getrusage,int,who,struct rusage __user *,ru)1626 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1627 {
1628 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1629 who != RUSAGE_THREAD)
1630 return -EINVAL;
1631 return getrusage(current, who, ru);
1632 }
1633
1634 #ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE2(getrusage,int,who,struct compat_rusage __user *,ru)1635 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1636 {
1637 struct rusage r;
1638
1639 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1640 who != RUSAGE_THREAD)
1641 return -EINVAL;
1642
1643 k_getrusage(current, who, &r);
1644 return put_compat_rusage(&r, ru);
1645 }
1646 #endif
1647
SYSCALL_DEFINE1(umask,int,mask)1648 SYSCALL_DEFINE1(umask, int, mask)
1649 {
1650 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1651 return mask;
1652 }
1653
prctl_set_mm_exe_file(struct mm_struct * mm,unsigned int fd)1654 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1655 {
1656 struct fd exe;
1657 struct file *old_exe, *exe_file;
1658 struct inode *inode;
1659 int err;
1660
1661 exe = fdget(fd);
1662 if (!exe.file)
1663 return -EBADF;
1664
1665 inode = file_inode(exe.file);
1666
1667 /*
1668 * Because the original mm->exe_file points to executable file, make
1669 * sure that this one is executable as well, to avoid breaking an
1670 * overall picture.
1671 */
1672 err = -EACCES;
1673 if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1674 goto exit;
1675
1676 err = inode_permission(inode, MAY_EXEC);
1677 if (err)
1678 goto exit;
1679
1680 /*
1681 * Forbid mm->exe_file change if old file still mapped.
1682 */
1683 exe_file = get_mm_exe_file(mm);
1684 err = -EBUSY;
1685 if (exe_file) {
1686 struct vm_area_struct *vma;
1687
1688 down_read(&mm->mmap_sem);
1689 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1690 if (!vma->vm_file)
1691 continue;
1692 if (path_equal(&vma->vm_file->f_path,
1693 &exe_file->f_path))
1694 goto exit_err;
1695 }
1696
1697 up_read(&mm->mmap_sem);
1698 fput(exe_file);
1699 }
1700
1701 /*
1702 * The symlink can be changed only once, just to disallow arbitrary
1703 * transitions malicious software might bring in. This means one
1704 * could make a snapshot over all processes running and monitor
1705 * /proc/pid/exe changes to notice unusual activity if needed.
1706 */
1707 err = -EPERM;
1708 if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
1709 goto exit;
1710
1711 err = 0;
1712 /* set the new file, lockless */
1713 get_file(exe.file);
1714 old_exe = xchg(&mm->exe_file, exe.file);
1715 if (old_exe)
1716 fput(old_exe);
1717 exit:
1718 fdput(exe);
1719 return err;
1720 exit_err:
1721 up_read(&mm->mmap_sem);
1722 fput(exe_file);
1723 goto exit;
1724 }
1725
1726 /*
1727 * WARNING: we don't require any capability here so be very careful
1728 * in what is allowed for modification from userspace.
1729 */
validate_prctl_map(struct prctl_mm_map * prctl_map)1730 static int validate_prctl_map(struct prctl_mm_map *prctl_map)
1731 {
1732 unsigned long mmap_max_addr = TASK_SIZE;
1733 struct mm_struct *mm = current->mm;
1734 int error = -EINVAL, i;
1735
1736 static const unsigned char offsets[] = {
1737 offsetof(struct prctl_mm_map, start_code),
1738 offsetof(struct prctl_mm_map, end_code),
1739 offsetof(struct prctl_mm_map, start_data),
1740 offsetof(struct prctl_mm_map, end_data),
1741 offsetof(struct prctl_mm_map, start_brk),
1742 offsetof(struct prctl_mm_map, brk),
1743 offsetof(struct prctl_mm_map, start_stack),
1744 offsetof(struct prctl_mm_map, arg_start),
1745 offsetof(struct prctl_mm_map, arg_end),
1746 offsetof(struct prctl_mm_map, env_start),
1747 offsetof(struct prctl_mm_map, env_end),
1748 };
1749
1750 /*
1751 * Make sure the members are not somewhere outside
1752 * of allowed address space.
1753 */
1754 for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1755 u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1756
1757 if ((unsigned long)val >= mmap_max_addr ||
1758 (unsigned long)val < mmap_min_addr)
1759 goto out;
1760 }
1761
1762 /*
1763 * Make sure the pairs are ordered.
1764 */
1765 #define __prctl_check_order(__m1, __op, __m2) \
1766 ((unsigned long)prctl_map->__m1 __op \
1767 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1768 error = __prctl_check_order(start_code, <, end_code);
1769 error |= __prctl_check_order(start_data, <, end_data);
1770 error |= __prctl_check_order(start_brk, <=, brk);
1771 error |= __prctl_check_order(arg_start, <=, arg_end);
1772 error |= __prctl_check_order(env_start, <=, env_end);
1773 if (error)
1774 goto out;
1775 #undef __prctl_check_order
1776
1777 error = -EINVAL;
1778
1779 /*
1780 * @brk should be after @end_data in traditional maps.
1781 */
1782 if (prctl_map->start_brk <= prctl_map->end_data ||
1783 prctl_map->brk <= prctl_map->end_data)
1784 goto out;
1785
1786 /*
1787 * Neither we should allow to override limits if they set.
1788 */
1789 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1790 prctl_map->start_brk, prctl_map->end_data,
1791 prctl_map->start_data))
1792 goto out;
1793
1794 /*
1795 * Someone is trying to cheat the auxv vector.
1796 */
1797 if (prctl_map->auxv_size) {
1798 if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv))
1799 goto out;
1800 }
1801
1802 /*
1803 * Finally, make sure the caller has the rights to
1804 * change /proc/pid/exe link: only local root should
1805 * be allowed to.
1806 */
1807 if (prctl_map->exe_fd != (u32)-1) {
1808 struct user_namespace *ns = current_user_ns();
1809 const struct cred *cred = current_cred();
1810
1811 if (!uid_eq(cred->uid, make_kuid(ns, 0)) ||
1812 !gid_eq(cred->gid, make_kgid(ns, 0)))
1813 goto out;
1814 }
1815
1816 error = 0;
1817 out:
1818 return error;
1819 }
1820
1821 #ifdef CONFIG_CHECKPOINT_RESTORE
prctl_set_mm_map(int opt,const void __user * addr,unsigned long data_size)1822 static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1823 {
1824 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1825 unsigned long user_auxv[AT_VECTOR_SIZE];
1826 struct mm_struct *mm = current->mm;
1827 int error;
1828
1829 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1830 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1831
1832 if (opt == PR_SET_MM_MAP_SIZE)
1833 return put_user((unsigned int)sizeof(prctl_map),
1834 (unsigned int __user *)addr);
1835
1836 if (data_size != sizeof(prctl_map))
1837 return -EINVAL;
1838
1839 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1840 return -EFAULT;
1841
1842 error = validate_prctl_map(&prctl_map);
1843 if (error)
1844 return error;
1845
1846 if (prctl_map.auxv_size) {
1847 memset(user_auxv, 0, sizeof(user_auxv));
1848 if (copy_from_user(user_auxv,
1849 (const void __user *)prctl_map.auxv,
1850 prctl_map.auxv_size))
1851 return -EFAULT;
1852
1853 /* Last entry must be AT_NULL as specification requires */
1854 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
1855 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
1856 }
1857
1858 if (prctl_map.exe_fd != (u32)-1) {
1859 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
1860 if (error)
1861 return error;
1862 }
1863
1864 down_write(&mm->mmap_sem);
1865
1866 /*
1867 * We don't validate if these members are pointing to
1868 * real present VMAs because application may have correspond
1869 * VMAs already unmapped and kernel uses these members for statistics
1870 * output in procfs mostly, except
1871 *
1872 * - @start_brk/@brk which are used in do_brk but kernel lookups
1873 * for VMAs when updating these memvers so anything wrong written
1874 * here cause kernel to swear at userspace program but won't lead
1875 * to any problem in kernel itself
1876 */
1877
1878 mm->start_code = prctl_map.start_code;
1879 mm->end_code = prctl_map.end_code;
1880 mm->start_data = prctl_map.start_data;
1881 mm->end_data = prctl_map.end_data;
1882 mm->start_brk = prctl_map.start_brk;
1883 mm->brk = prctl_map.brk;
1884 mm->start_stack = prctl_map.start_stack;
1885 mm->arg_start = prctl_map.arg_start;
1886 mm->arg_end = prctl_map.arg_end;
1887 mm->env_start = prctl_map.env_start;
1888 mm->env_end = prctl_map.env_end;
1889
1890 /*
1891 * Note this update of @saved_auxv is lockless thus
1892 * if someone reads this member in procfs while we're
1893 * updating -- it may get partly updated results. It's
1894 * known and acceptable trade off: we leave it as is to
1895 * not introduce additional locks here making the kernel
1896 * more complex.
1897 */
1898 if (prctl_map.auxv_size)
1899 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
1900
1901 up_write(&mm->mmap_sem);
1902 return 0;
1903 }
1904 #endif /* CONFIG_CHECKPOINT_RESTORE */
1905
prctl_set_auxv(struct mm_struct * mm,unsigned long addr,unsigned long len)1906 static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
1907 unsigned long len)
1908 {
1909 /*
1910 * This doesn't move the auxiliary vector itself since it's pinned to
1911 * mm_struct, but it permits filling the vector with new values. It's
1912 * up to the caller to provide sane values here, otherwise userspace
1913 * tools which use this vector might be unhappy.
1914 */
1915 unsigned long user_auxv[AT_VECTOR_SIZE];
1916
1917 if (len > sizeof(user_auxv))
1918 return -EINVAL;
1919
1920 if (copy_from_user(user_auxv, (const void __user *)addr, len))
1921 return -EFAULT;
1922
1923 /* Make sure the last entry is always AT_NULL */
1924 user_auxv[AT_VECTOR_SIZE - 2] = 0;
1925 user_auxv[AT_VECTOR_SIZE - 1] = 0;
1926
1927 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1928
1929 task_lock(current);
1930 memcpy(mm->saved_auxv, user_auxv, len);
1931 task_unlock(current);
1932
1933 return 0;
1934 }
1935
prctl_set_mm(int opt,unsigned long addr,unsigned long arg4,unsigned long arg5)1936 static int prctl_set_mm(int opt, unsigned long addr,
1937 unsigned long arg4, unsigned long arg5)
1938 {
1939 struct mm_struct *mm = current->mm;
1940 struct prctl_mm_map prctl_map;
1941 struct vm_area_struct *vma;
1942 int error;
1943
1944 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
1945 opt != PR_SET_MM_MAP &&
1946 opt != PR_SET_MM_MAP_SIZE)))
1947 return -EINVAL;
1948
1949 #ifdef CONFIG_CHECKPOINT_RESTORE
1950 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
1951 return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
1952 #endif
1953
1954 if (!capable(CAP_SYS_RESOURCE))
1955 return -EPERM;
1956
1957 if (opt == PR_SET_MM_EXE_FILE)
1958 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
1959
1960 if (opt == PR_SET_MM_AUXV)
1961 return prctl_set_auxv(mm, addr, arg4);
1962
1963 if (addr >= TASK_SIZE || addr < mmap_min_addr)
1964 return -EINVAL;
1965
1966 error = -EINVAL;
1967
1968 down_write(&mm->mmap_sem);
1969 vma = find_vma(mm, addr);
1970
1971 prctl_map.start_code = mm->start_code;
1972 prctl_map.end_code = mm->end_code;
1973 prctl_map.start_data = mm->start_data;
1974 prctl_map.end_data = mm->end_data;
1975 prctl_map.start_brk = mm->start_brk;
1976 prctl_map.brk = mm->brk;
1977 prctl_map.start_stack = mm->start_stack;
1978 prctl_map.arg_start = mm->arg_start;
1979 prctl_map.arg_end = mm->arg_end;
1980 prctl_map.env_start = mm->env_start;
1981 prctl_map.env_end = mm->env_end;
1982 prctl_map.auxv = NULL;
1983 prctl_map.auxv_size = 0;
1984 prctl_map.exe_fd = -1;
1985
1986 switch (opt) {
1987 case PR_SET_MM_START_CODE:
1988 prctl_map.start_code = addr;
1989 break;
1990 case PR_SET_MM_END_CODE:
1991 prctl_map.end_code = addr;
1992 break;
1993 case PR_SET_MM_START_DATA:
1994 prctl_map.start_data = addr;
1995 break;
1996 case PR_SET_MM_END_DATA:
1997 prctl_map.end_data = addr;
1998 break;
1999 case PR_SET_MM_START_STACK:
2000 prctl_map.start_stack = addr;
2001 break;
2002 case PR_SET_MM_START_BRK:
2003 prctl_map.start_brk = addr;
2004 break;
2005 case PR_SET_MM_BRK:
2006 prctl_map.brk = addr;
2007 break;
2008 case PR_SET_MM_ARG_START:
2009 prctl_map.arg_start = addr;
2010 break;
2011 case PR_SET_MM_ARG_END:
2012 prctl_map.arg_end = addr;
2013 break;
2014 case PR_SET_MM_ENV_START:
2015 prctl_map.env_start = addr;
2016 break;
2017 case PR_SET_MM_ENV_END:
2018 prctl_map.env_end = addr;
2019 break;
2020 default:
2021 goto out;
2022 }
2023
2024 error = validate_prctl_map(&prctl_map);
2025 if (error)
2026 goto out;
2027
2028 switch (opt) {
2029 /*
2030 * If command line arguments and environment
2031 * are placed somewhere else on stack, we can
2032 * set them up here, ARG_START/END to setup
2033 * command line argumets and ENV_START/END
2034 * for environment.
2035 */
2036 case PR_SET_MM_START_STACK:
2037 case PR_SET_MM_ARG_START:
2038 case PR_SET_MM_ARG_END:
2039 case PR_SET_MM_ENV_START:
2040 case PR_SET_MM_ENV_END:
2041 if (!vma) {
2042 error = -EFAULT;
2043 goto out;
2044 }
2045 }
2046
2047 mm->start_code = prctl_map.start_code;
2048 mm->end_code = prctl_map.end_code;
2049 mm->start_data = prctl_map.start_data;
2050 mm->end_data = prctl_map.end_data;
2051 mm->start_brk = prctl_map.start_brk;
2052 mm->brk = prctl_map.brk;
2053 mm->start_stack = prctl_map.start_stack;
2054 mm->arg_start = prctl_map.arg_start;
2055 mm->arg_end = prctl_map.arg_end;
2056 mm->env_start = prctl_map.env_start;
2057 mm->env_end = prctl_map.env_end;
2058
2059 error = 0;
2060 out:
2061 up_write(&mm->mmap_sem);
2062 return error;
2063 }
2064
2065 #ifdef CONFIG_CHECKPOINT_RESTORE
prctl_get_tid_address(struct task_struct * me,int __user ** tid_addr)2066 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2067 {
2068 return put_user(me->clear_child_tid, tid_addr);
2069 }
2070 #else
prctl_get_tid_address(struct task_struct * me,int __user ** tid_addr)2071 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2072 {
2073 return -EINVAL;
2074 }
2075 #endif
2076
2077 #ifdef CONFIG_MMU
prctl_update_vma_anon_name(struct vm_area_struct * vma,struct vm_area_struct ** prev,unsigned long start,unsigned long end,const char __user * name_addr)2078 static int prctl_update_vma_anon_name(struct vm_area_struct *vma,
2079 struct vm_area_struct **prev,
2080 unsigned long start, unsigned long end,
2081 const char __user *name_addr)
2082 {
2083 struct mm_struct *mm = vma->vm_mm;
2084 int error = 0;
2085 pgoff_t pgoff;
2086
2087 if (name_addr == vma_get_anon_name(vma)) {
2088 *prev = vma;
2089 goto out;
2090 }
2091
2092 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
2093 *prev = vma_merge(mm, *prev, start, end, vma->vm_flags, vma->anon_vma,
2094 vma->vm_file, pgoff, vma_policy(vma),
2095 vma->vm_userfaultfd_ctx, name_addr);
2096 if (*prev) {
2097 vma = *prev;
2098 goto success;
2099 }
2100
2101 *prev = vma;
2102
2103 if (start != vma->vm_start) {
2104 error = split_vma(mm, vma, start, 1);
2105 if (error)
2106 goto out;
2107 }
2108
2109 if (end != vma->vm_end) {
2110 error = split_vma(mm, vma, end, 0);
2111 if (error)
2112 goto out;
2113 }
2114
2115 success:
2116 if (!vma->vm_file)
2117 vma->anon_name = name_addr;
2118
2119 out:
2120 if (error == -ENOMEM)
2121 error = -EAGAIN;
2122 return error;
2123 }
2124
prctl_set_vma_anon_name(unsigned long start,unsigned long end,unsigned long arg)2125 static int prctl_set_vma_anon_name(unsigned long start, unsigned long end,
2126 unsigned long arg)
2127 {
2128 unsigned long tmp;
2129 struct vm_area_struct *vma, *prev;
2130 int unmapped_error = 0;
2131 int error = -EINVAL;
2132
2133 /*
2134 * If the interval [start,end) covers some unmapped address
2135 * ranges, just ignore them, but return -ENOMEM at the end.
2136 * - this matches the handling in madvise.
2137 */
2138 vma = find_vma_prev(current->mm, start, &prev);
2139 if (vma && start > vma->vm_start)
2140 prev = vma;
2141
2142 for (;;) {
2143 /* Still start < end. */
2144 error = -ENOMEM;
2145 if (!vma)
2146 return error;
2147
2148 /* Here start < (end|vma->vm_end). */
2149 if (start < vma->vm_start) {
2150 unmapped_error = -ENOMEM;
2151 start = vma->vm_start;
2152 if (start >= end)
2153 return error;
2154 }
2155
2156 /* Here vma->vm_start <= start < (end|vma->vm_end) */
2157 tmp = vma->vm_end;
2158 if (end < tmp)
2159 tmp = end;
2160
2161 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
2162 error = prctl_update_vma_anon_name(vma, &prev, start, tmp,
2163 (const char __user *)arg);
2164 if (error)
2165 return error;
2166 start = tmp;
2167 if (prev && start < prev->vm_end)
2168 start = prev->vm_end;
2169 error = unmapped_error;
2170 if (start >= end)
2171 return error;
2172 if (prev)
2173 vma = prev->vm_next;
2174 else /* madvise_remove dropped mmap_sem */
2175 vma = find_vma(current->mm, start);
2176 }
2177 }
2178
prctl_set_vma(unsigned long opt,unsigned long start,unsigned long len_in,unsigned long arg)2179 static int prctl_set_vma(unsigned long opt, unsigned long start,
2180 unsigned long len_in, unsigned long arg)
2181 {
2182 struct mm_struct *mm = current->mm;
2183 int error;
2184 unsigned long len;
2185 unsigned long end;
2186
2187 if (start & ~PAGE_MASK)
2188 return -EINVAL;
2189 len = (len_in + ~PAGE_MASK) & PAGE_MASK;
2190
2191 /* Check to see whether len was rounded up from small -ve to zero */
2192 if (len_in && !len)
2193 return -EINVAL;
2194
2195 end = start + len;
2196 if (end < start)
2197 return -EINVAL;
2198
2199 if (end == start)
2200 return 0;
2201
2202 down_write(&mm->mmap_sem);
2203
2204 switch (opt) {
2205 case PR_SET_VMA_ANON_NAME:
2206 error = prctl_set_vma_anon_name(start, end, arg);
2207 break;
2208 default:
2209 error = -EINVAL;
2210 }
2211
2212 up_write(&mm->mmap_sem);
2213
2214 return error;
2215 }
2216 #else /* CONFIG_MMU */
prctl_set_vma(unsigned long opt,unsigned long start,unsigned long len_in,unsigned long arg)2217 static int prctl_set_vma(unsigned long opt, unsigned long start,
2218 unsigned long len_in, unsigned long arg)
2219 {
2220 return -EINVAL;
2221 }
2222 #endif
2223
SYSCALL_DEFINE5(prctl,int,option,unsigned long,arg2,unsigned long,arg3,unsigned long,arg4,unsigned long,arg5)2224 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2225 unsigned long, arg4, unsigned long, arg5)
2226 {
2227 struct task_struct *me = current;
2228 unsigned char comm[sizeof(me->comm)];
2229 long error;
2230
2231 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2232 if (error != -ENOSYS)
2233 return error;
2234
2235 error = 0;
2236 switch (option) {
2237 case PR_SET_PDEATHSIG:
2238 if (!valid_signal(arg2)) {
2239 error = -EINVAL;
2240 break;
2241 }
2242 me->pdeath_signal = arg2;
2243 break;
2244 case PR_GET_PDEATHSIG:
2245 error = put_user(me->pdeath_signal, (int __user *)arg2);
2246 break;
2247 case PR_GET_DUMPABLE:
2248 error = get_dumpable(me->mm);
2249 break;
2250 case PR_SET_DUMPABLE:
2251 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2252 error = -EINVAL;
2253 break;
2254 }
2255 set_dumpable(me->mm, arg2);
2256 break;
2257
2258 case PR_SET_UNALIGN:
2259 error = SET_UNALIGN_CTL(me, arg2);
2260 break;
2261 case PR_GET_UNALIGN:
2262 error = GET_UNALIGN_CTL(me, arg2);
2263 break;
2264 case PR_SET_FPEMU:
2265 error = SET_FPEMU_CTL(me, arg2);
2266 break;
2267 case PR_GET_FPEMU:
2268 error = GET_FPEMU_CTL(me, arg2);
2269 break;
2270 case PR_SET_FPEXC:
2271 error = SET_FPEXC_CTL(me, arg2);
2272 break;
2273 case PR_GET_FPEXC:
2274 error = GET_FPEXC_CTL(me, arg2);
2275 break;
2276 case PR_GET_TIMING:
2277 error = PR_TIMING_STATISTICAL;
2278 break;
2279 case PR_SET_TIMING:
2280 if (arg2 != PR_TIMING_STATISTICAL)
2281 error = -EINVAL;
2282 break;
2283 case PR_SET_NAME:
2284 comm[sizeof(me->comm) - 1] = 0;
2285 if (strncpy_from_user(comm, (char __user *)arg2,
2286 sizeof(me->comm) - 1) < 0)
2287 return -EFAULT;
2288 set_task_comm(me, comm);
2289 proc_comm_connector(me);
2290 break;
2291 case PR_GET_NAME:
2292 get_task_comm(comm, me);
2293 if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2294 return -EFAULT;
2295 break;
2296 case PR_GET_ENDIAN:
2297 error = GET_ENDIAN(me, arg2);
2298 break;
2299 case PR_SET_ENDIAN:
2300 error = SET_ENDIAN(me, arg2);
2301 break;
2302 case PR_GET_SECCOMP:
2303 error = prctl_get_seccomp();
2304 break;
2305 case PR_SET_SECCOMP:
2306 error = prctl_set_seccomp(arg2, (char __user *)arg3);
2307 break;
2308 case PR_GET_TSC:
2309 error = GET_TSC_CTL(arg2);
2310 break;
2311 case PR_SET_TSC:
2312 error = SET_TSC_CTL(arg2);
2313 break;
2314 case PR_TASK_PERF_EVENTS_DISABLE:
2315 error = perf_event_task_disable();
2316 break;
2317 case PR_TASK_PERF_EVENTS_ENABLE:
2318 error = perf_event_task_enable();
2319 break;
2320 case PR_GET_TIMERSLACK:
2321 if (current->timer_slack_ns > ULONG_MAX)
2322 error = ULONG_MAX;
2323 else
2324 error = current->timer_slack_ns;
2325 break;
2326 case PR_SET_TIMERSLACK:
2327 if (arg2 <= 0)
2328 current->timer_slack_ns =
2329 current->default_timer_slack_ns;
2330 else
2331 current->timer_slack_ns = arg2;
2332 break;
2333 case PR_MCE_KILL:
2334 if (arg4 | arg5)
2335 return -EINVAL;
2336 switch (arg2) {
2337 case PR_MCE_KILL_CLEAR:
2338 if (arg3 != 0)
2339 return -EINVAL;
2340 current->flags &= ~PF_MCE_PROCESS;
2341 break;
2342 case PR_MCE_KILL_SET:
2343 current->flags |= PF_MCE_PROCESS;
2344 if (arg3 == PR_MCE_KILL_EARLY)
2345 current->flags |= PF_MCE_EARLY;
2346 else if (arg3 == PR_MCE_KILL_LATE)
2347 current->flags &= ~PF_MCE_EARLY;
2348 else if (arg3 == PR_MCE_KILL_DEFAULT)
2349 current->flags &=
2350 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
2351 else
2352 return -EINVAL;
2353 break;
2354 default:
2355 return -EINVAL;
2356 }
2357 break;
2358 case PR_MCE_KILL_GET:
2359 if (arg2 | arg3 | arg4 | arg5)
2360 return -EINVAL;
2361 if (current->flags & PF_MCE_PROCESS)
2362 error = (current->flags & PF_MCE_EARLY) ?
2363 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2364 else
2365 error = PR_MCE_KILL_DEFAULT;
2366 break;
2367 case PR_SET_MM:
2368 error = prctl_set_mm(arg2, arg3, arg4, arg5);
2369 break;
2370 case PR_GET_TID_ADDRESS:
2371 error = prctl_get_tid_address(me, (int __user **)arg2);
2372 break;
2373 case PR_SET_CHILD_SUBREAPER:
2374 me->signal->is_child_subreaper = !!arg2;
2375 break;
2376 case PR_GET_CHILD_SUBREAPER:
2377 error = put_user(me->signal->is_child_subreaper,
2378 (int __user *)arg2);
2379 break;
2380 case PR_SET_NO_NEW_PRIVS:
2381 if (arg2 != 1 || arg3 || arg4 || arg5)
2382 return -EINVAL;
2383
2384 task_set_no_new_privs(current);
2385 break;
2386 case PR_GET_NO_NEW_PRIVS:
2387 if (arg2 || arg3 || arg4 || arg5)
2388 return -EINVAL;
2389 return task_no_new_privs(current) ? 1 : 0;
2390 case PR_GET_THP_DISABLE:
2391 if (arg2 || arg3 || arg4 || arg5)
2392 return -EINVAL;
2393 error = !!(me->mm->def_flags & VM_NOHUGEPAGE);
2394 break;
2395 case PR_SET_THP_DISABLE:
2396 if (arg3 || arg4 || arg5)
2397 return -EINVAL;
2398 if (down_write_killable(&me->mm->mmap_sem))
2399 return -EINTR;
2400 if (arg2)
2401 me->mm->def_flags |= VM_NOHUGEPAGE;
2402 else
2403 me->mm->def_flags &= ~VM_NOHUGEPAGE;
2404 up_write(&me->mm->mmap_sem);
2405 break;
2406 case PR_MPX_ENABLE_MANAGEMENT:
2407 if (arg2 || arg3 || arg4 || arg5)
2408 return -EINVAL;
2409 error = MPX_ENABLE_MANAGEMENT();
2410 break;
2411 case PR_MPX_DISABLE_MANAGEMENT:
2412 if (arg2 || arg3 || arg4 || arg5)
2413 return -EINVAL;
2414 error = MPX_DISABLE_MANAGEMENT();
2415 break;
2416 case PR_SET_FP_MODE:
2417 error = SET_FP_MODE(me, arg2);
2418 break;
2419 case PR_GET_FP_MODE:
2420 error = GET_FP_MODE(me);
2421 break;
2422 case PR_SET_VMA:
2423 error = prctl_set_vma(arg2, arg3, arg4, arg5);
2424 break;
2425 default:
2426 error = -EINVAL;
2427 break;
2428 }
2429 return error;
2430 }
2431
SYSCALL_DEFINE3(getcpu,unsigned __user *,cpup,unsigned __user *,nodep,struct getcpu_cache __user *,unused)2432 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2433 struct getcpu_cache __user *, unused)
2434 {
2435 int err = 0;
2436 int cpu = raw_smp_processor_id();
2437
2438 if (cpup)
2439 err |= put_user(cpu, cpup);
2440 if (nodep)
2441 err |= put_user(cpu_to_node(cpu), nodep);
2442 return err ? -EFAULT : 0;
2443 }
2444
2445 /**
2446 * do_sysinfo - fill in sysinfo struct
2447 * @info: pointer to buffer to fill
2448 */
do_sysinfo(struct sysinfo * info)2449 static int do_sysinfo(struct sysinfo *info)
2450 {
2451 unsigned long mem_total, sav_total;
2452 unsigned int mem_unit, bitcount;
2453 struct timespec tp;
2454
2455 memset(info, 0, sizeof(struct sysinfo));
2456
2457 get_monotonic_boottime(&tp);
2458 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2459
2460 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2461
2462 info->procs = nr_threads;
2463
2464 si_meminfo(info);
2465 si_swapinfo(info);
2466
2467 /*
2468 * If the sum of all the available memory (i.e. ram + swap)
2469 * is less than can be stored in a 32 bit unsigned long then
2470 * we can be binary compatible with 2.2.x kernels. If not,
2471 * well, in that case 2.2.x was broken anyways...
2472 *
2473 * -Erik Andersen <andersee@debian.org>
2474 */
2475
2476 mem_total = info->totalram + info->totalswap;
2477 if (mem_total < info->totalram || mem_total < info->totalswap)
2478 goto out;
2479 bitcount = 0;
2480 mem_unit = info->mem_unit;
2481 while (mem_unit > 1) {
2482 bitcount++;
2483 mem_unit >>= 1;
2484 sav_total = mem_total;
2485 mem_total <<= 1;
2486 if (mem_total < sav_total)
2487 goto out;
2488 }
2489
2490 /*
2491 * If mem_total did not overflow, multiply all memory values by
2492 * info->mem_unit and set it to 1. This leaves things compatible
2493 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2494 * kernels...
2495 */
2496
2497 info->mem_unit = 1;
2498 info->totalram <<= bitcount;
2499 info->freeram <<= bitcount;
2500 info->sharedram <<= bitcount;
2501 info->bufferram <<= bitcount;
2502 info->totalswap <<= bitcount;
2503 info->freeswap <<= bitcount;
2504 info->totalhigh <<= bitcount;
2505 info->freehigh <<= bitcount;
2506
2507 out:
2508 return 0;
2509 }
2510
SYSCALL_DEFINE1(sysinfo,struct sysinfo __user *,info)2511 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2512 {
2513 struct sysinfo val;
2514
2515 do_sysinfo(&val);
2516
2517 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2518 return -EFAULT;
2519
2520 return 0;
2521 }
2522
2523 #ifdef CONFIG_COMPAT
2524 struct compat_sysinfo {
2525 s32 uptime;
2526 u32 loads[3];
2527 u32 totalram;
2528 u32 freeram;
2529 u32 sharedram;
2530 u32 bufferram;
2531 u32 totalswap;
2532 u32 freeswap;
2533 u16 procs;
2534 u16 pad;
2535 u32 totalhigh;
2536 u32 freehigh;
2537 u32 mem_unit;
2538 char _f[20-2*sizeof(u32)-sizeof(int)];
2539 };
2540
COMPAT_SYSCALL_DEFINE1(sysinfo,struct compat_sysinfo __user *,info)2541 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2542 {
2543 struct sysinfo s;
2544
2545 do_sysinfo(&s);
2546
2547 /* Check to see if any memory value is too large for 32-bit and scale
2548 * down if needed
2549 */
2550 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2551 int bitcount = 0;
2552
2553 while (s.mem_unit < PAGE_SIZE) {
2554 s.mem_unit <<= 1;
2555 bitcount++;
2556 }
2557
2558 s.totalram >>= bitcount;
2559 s.freeram >>= bitcount;
2560 s.sharedram >>= bitcount;
2561 s.bufferram >>= bitcount;
2562 s.totalswap >>= bitcount;
2563 s.freeswap >>= bitcount;
2564 s.totalhigh >>= bitcount;
2565 s.freehigh >>= bitcount;
2566 }
2567
2568 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
2569 __put_user(s.uptime, &info->uptime) ||
2570 __put_user(s.loads[0], &info->loads[0]) ||
2571 __put_user(s.loads[1], &info->loads[1]) ||
2572 __put_user(s.loads[2], &info->loads[2]) ||
2573 __put_user(s.totalram, &info->totalram) ||
2574 __put_user(s.freeram, &info->freeram) ||
2575 __put_user(s.sharedram, &info->sharedram) ||
2576 __put_user(s.bufferram, &info->bufferram) ||
2577 __put_user(s.totalswap, &info->totalswap) ||
2578 __put_user(s.freeswap, &info->freeswap) ||
2579 __put_user(s.procs, &info->procs) ||
2580 __put_user(s.totalhigh, &info->totalhigh) ||
2581 __put_user(s.freehigh, &info->freehigh) ||
2582 __put_user(s.mem_unit, &info->mem_unit))
2583 return -EFAULT;
2584
2585 return 0;
2586 }
2587 #endif /* CONFIG_COMPAT */
2588