1
2
3
4
5
6
7
8#include <linux/export.h>
9#include <linux/mm.h>
10#include <linux/utsname.h>
11#include <linux/mman.h>
12#include <linux/reboot.h>
13#include <linux/prctl.h>
14#include <linux/highuid.h>
15#include <linux/fs.h>
16#include <linux/kmod.h>
17#include <linux/perf_event.h>
18#include <linux/resource.h>
19#include <linux/kernel.h>
20#include <linux/workqueue.h>
21#include <linux/capability.h>
22#include <linux/device.h>
23#include <linux/key.h>
24#include <linux/times.h>
25#include <linux/posix-timers.h>
26#include <linux/security.h>
27#include <linux/dcookies.h>
28#include <linux/suspend.h>
29#include <linux/tty.h>
30#include <linux/signal.h>
31#include <linux/cn_proc.h>
32#include <linux/getcpu.h>
33#include <linux/task_io_accounting_ops.h>
34#include <linux/seccomp.h>
35#include <linux/cpu.h>
36#include <linux/personality.h>
37#include <linux/ptrace.h>
38#include <linux/fs_struct.h>
39#include <linux/file.h>
40#include <linux/mount.h>
41#include <linux/gfp.h>
42#include <linux/syscore_ops.h>
43#include <linux/version.h>
44#include <linux/ctype.h>
45
46#include <linux/compat.h>
47#include <linux/syscalls.h>
48#include <linux/kprobes.h>
49#include <linux/user_namespace.h>
50#include <linux/time_namespace.h>
51#include <linux/binfmts.h>
52
53#include <linux/sched.h>
54#include <linux/sched/autogroup.h>
55#include <linux/sched/loadavg.h>
56#include <linux/sched/stat.h>
57#include <linux/sched/mm.h>
58#include <linux/sched/coredump.h>
59#include <linux/sched/task.h>
60#include <linux/sched/cputime.h>
61#include <linux/rcupdate.h>
62#include <linux/uidgid.h>
63#include <linux/cred.h>
64
65#include <linux/nospec.h>
66
67#include <linux/kmsg_dump.h>
68
69#include <generated/utsrelease.h>
70
71#include <linux/uaccess.h>
72#include <asm/io.h>
73#include <asm/unistd.h>
74
75
76#include <linux/nospec.h>
77
78#include "uid16.h"
79
80#ifndef SET_UNALIGN_CTL
81# define SET_UNALIGN_CTL(a, b) (-EINVAL)
82#endif
83#ifndef GET_UNALIGN_CTL
84# define GET_UNALIGN_CTL(a, b) (-EINVAL)
85#endif
86#ifndef SET_FPEMU_CTL
87# define SET_FPEMU_CTL(a, b) (-EINVAL)
88#endif
89#ifndef GET_FPEMU_CTL
90# define GET_FPEMU_CTL(a, b) (-EINVAL)
91#endif
92#ifndef SET_FPEXC_CTL
93# define SET_FPEXC_CTL(a, b) (-EINVAL)
94#endif
95#ifndef GET_FPEXC_CTL
96# define GET_FPEXC_CTL(a, b) (-EINVAL)
97#endif
98#ifndef GET_ENDIAN
99# define GET_ENDIAN(a, b) (-EINVAL)
100#endif
101#ifndef SET_ENDIAN
102# define SET_ENDIAN(a, b) (-EINVAL)
103#endif
104#ifndef GET_TSC_CTL
105# define GET_TSC_CTL(a) (-EINVAL)
106#endif
107#ifndef SET_TSC_CTL
108# define SET_TSC_CTL(a) (-EINVAL)
109#endif
110#ifndef MPX_ENABLE_MANAGEMENT
111# define MPX_ENABLE_MANAGEMENT() (-EINVAL)
112#endif
113#ifndef MPX_DISABLE_MANAGEMENT
114# define MPX_DISABLE_MANAGEMENT() (-EINVAL)
115#endif
116#ifndef GET_FP_MODE
117# define GET_FP_MODE(a) (-EINVAL)
118#endif
119#ifndef SET_FP_MODE
120# define SET_FP_MODE(a,b) (-EINVAL)
121#endif
122#ifndef SVE_SET_VL
123# define SVE_SET_VL(a) (-EINVAL)
124#endif
125#ifndef SVE_GET_VL
126# define SVE_GET_VL() (-EINVAL)
127#endif
128#ifndef PAC_RESET_KEYS
129# define PAC_RESET_KEYS(a, b) (-EINVAL)
130#endif
131
132
133
134
135
136
137int overflowuid = DEFAULT_OVERFLOWUID;
138int overflowgid = DEFAULT_OVERFLOWGID;
139
140EXPORT_SYMBOL(overflowuid);
141EXPORT_SYMBOL(overflowgid);
142
143
144
145
146
147
148int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
149int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
150
151EXPORT_SYMBOL(fs_overflowuid);
152EXPORT_SYMBOL(fs_overflowgid);
153
154
155
156
157
158
159
160static bool set_one_prio_perm(struct task_struct *p)
161{
162 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
163
164 if (uid_eq(pcred->uid, cred->euid) ||
165 uid_eq(pcred->euid, cred->euid))
166 return true;
167 if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
168 return true;
169 return false;
170}
171
172
173
174
175
176static int set_one_prio(struct task_struct *p, int niceval, int error)
177{
178 int no_nice;
179
180 if (!set_one_prio_perm(p)) {
181 error = -EPERM;
182 goto out;
183 }
184 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
185 error = -EACCES;
186 goto out;
187 }
188 no_nice = security_task_setnice(p, niceval);
189 if (no_nice) {
190 error = no_nice;
191 goto out;
192 }
193 if (error == -ESRCH)
194 error = 0;
195 set_user_nice(p, niceval);
196out:
197 return error;
198}
199
200SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
201{
202 struct task_struct *g, *p;
203 struct user_struct *user;
204 const struct cred *cred = current_cred();
205 int error = -EINVAL;
206 struct pid *pgrp;
207 kuid_t uid;
208
209 if (which > PRIO_USER || which < PRIO_PROCESS)
210 goto out;
211
212
213 error = -ESRCH;
214 if (niceval < MIN_NICE)
215 niceval = MIN_NICE;
216 if (niceval > MAX_NICE)
217 niceval = MAX_NICE;
218
219 rcu_read_lock();
220 read_lock(&tasklist_lock);
221 switch (which) {
222 case PRIO_PROCESS:
223 if (who)
224 p = find_task_by_vpid(who);
225 else
226 p = current;
227 if (p)
228 error = set_one_prio(p, niceval, error);
229 break;
230 case PRIO_PGRP:
231 if (who)
232 pgrp = find_vpid(who);
233 else
234 pgrp = task_pgrp(current);
235 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
236 error = set_one_prio(p, niceval, error);
237 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
238 break;
239 case PRIO_USER:
240 uid = make_kuid(cred->user_ns, who);
241 user = cred->user;
242 if (!who)
243 uid = cred->uid;
244 else if (!uid_eq(uid, cred->uid)) {
245 user = find_user(uid);
246 if (!user)
247 goto out_unlock;
248 }
249 do_each_thread(g, p) {
250 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
251 error = set_one_prio(p, niceval, error);
252 } while_each_thread(g, p);
253 if (!uid_eq(uid, cred->uid))
254 free_uid(user);
255 break;
256 }
257out_unlock:
258 read_unlock(&tasklist_lock);
259 rcu_read_unlock();
260out:
261 return error;
262}
263
264
265
266
267
268
269
270SYSCALL_DEFINE2(getpriority, int, which, int, who)
271{
272 struct task_struct *g, *p;
273 struct user_struct *user;
274 const struct cred *cred = current_cred();
275 long niceval, retval = -ESRCH;
276 struct pid *pgrp;
277 kuid_t uid;
278
279 if (which > PRIO_USER || which < PRIO_PROCESS)
280 return -EINVAL;
281
282 rcu_read_lock();
283 read_lock(&tasklist_lock);
284 switch (which) {
285 case PRIO_PROCESS:
286 if (who)
287 p = find_task_by_vpid(who);
288 else
289 p = current;
290 if (p) {
291 niceval = nice_to_rlimit(task_nice(p));
292 if (niceval > retval)
293 retval = niceval;
294 }
295 break;
296 case PRIO_PGRP:
297 if (who)
298 pgrp = find_vpid(who);
299 else
300 pgrp = task_pgrp(current);
301 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
302 niceval = nice_to_rlimit(task_nice(p));
303 if (niceval > retval)
304 retval = niceval;
305 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
306 break;
307 case PRIO_USER:
308 uid = make_kuid(cred->user_ns, who);
309 user = cred->user;
310 if (!who)
311 uid = cred->uid;
312 else if (!uid_eq(uid, cred->uid)) {
313 user = find_user(uid);
314 if (!user)
315 goto out_unlock;
316 }
317 do_each_thread(g, p) {
318 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
319 niceval = nice_to_rlimit(task_nice(p));
320 if (niceval > retval)
321 retval = niceval;
322 }
323 } while_each_thread(g, p);
324 if (!uid_eq(uid, cred->uid))
325 free_uid(user);
326 break;
327 }
328out_unlock:
329 read_unlock(&tasklist_lock);
330 rcu_read_unlock();
331
332 return retval;
333}
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353#ifdef CONFIG_MULTIUSER
354long __sys_setregid(gid_t rgid, gid_t egid)
355{
356 struct user_namespace *ns = current_user_ns();
357 const struct cred *old;
358 struct cred *new;
359 int retval;
360 kgid_t krgid, kegid;
361
362 krgid = make_kgid(ns, rgid);
363 kegid = make_kgid(ns, egid);
364
365 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
366 return -EINVAL;
367 if ((egid != (gid_t) -1) && !gid_valid(kegid))
368 return -EINVAL;
369
370 new = prepare_creds();
371 if (!new)
372 return -ENOMEM;
373 old = current_cred();
374
375 retval = -EPERM;
376 if (rgid != (gid_t) -1) {
377 if (gid_eq(old->gid, krgid) ||
378 gid_eq(old->egid, krgid) ||
379 ns_capable(old->user_ns, CAP_SETGID))
380 new->gid = krgid;
381 else
382 goto error;
383 }
384 if (egid != (gid_t) -1) {
385 if (gid_eq(old->gid, kegid) ||
386 gid_eq(old->egid, kegid) ||
387 gid_eq(old->sgid, kegid) ||
388 ns_capable(old->user_ns, CAP_SETGID))
389 new->egid = kegid;
390 else
391 goto error;
392 }
393
394 if (rgid != (gid_t) -1 ||
395 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
396 new->sgid = new->egid;
397 new->fsgid = new->egid;
398
399 return commit_creds(new);
400
401error:
402 abort_creds(new);
403 return retval;
404}
405
406SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
407{
408 return __sys_setregid(rgid, egid);
409}
410
411
412
413
414
415
416long __sys_setgid(gid_t gid)
417{
418 struct user_namespace *ns = current_user_ns();
419 const struct cred *old;
420 struct cred *new;
421 int retval;
422 kgid_t kgid;
423
424 kgid = make_kgid(ns, gid);
425 if (!gid_valid(kgid))
426 return -EINVAL;
427
428 new = prepare_creds();
429 if (!new)
430 return -ENOMEM;
431 old = current_cred();
432
433 retval = -EPERM;
434 if (ns_capable(old->user_ns, CAP_SETGID))
435 new->gid = new->egid = new->sgid = new->fsgid = kgid;
436 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
437 new->egid = new->fsgid = kgid;
438 else
439 goto error;
440
441 return commit_creds(new);
442
443error:
444 abort_creds(new);
445 return retval;
446}
447
448SYSCALL_DEFINE1(setgid, gid_t, gid)
449{
450 return __sys_setgid(gid);
451}
452
453
454
455
456static int set_user(struct cred *new)
457{
458 struct user_struct *new_user;
459
460 new_user = alloc_uid(new->uid);
461 if (!new_user)
462 return -EAGAIN;
463
464
465
466
467
468
469
470
471 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
472 new_user != INIT_USER)
473 current->flags |= PF_NPROC_EXCEEDED;
474 else
475 current->flags &= ~PF_NPROC_EXCEEDED;
476
477 free_uid(new->user);
478 new->user = new_user;
479 return 0;
480}
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497long __sys_setreuid(uid_t ruid, uid_t euid)
498{
499 struct user_namespace *ns = current_user_ns();
500 const struct cred *old;
501 struct cred *new;
502 int retval;
503 kuid_t kruid, keuid;
504
505 kruid = make_kuid(ns, ruid);
506 keuid = make_kuid(ns, euid);
507
508 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
509 return -EINVAL;
510 if ((euid != (uid_t) -1) && !uid_valid(keuid))
511 return -EINVAL;
512
513 new = prepare_creds();
514 if (!new)
515 return -ENOMEM;
516 old = current_cred();
517
518 retval = -EPERM;
519 if (ruid != (uid_t) -1) {
520 new->uid = kruid;
521 if (!uid_eq(old->uid, kruid) &&
522 !uid_eq(old->euid, kruid) &&
523 !ns_capable(old->user_ns, CAP_SETUID))
524 goto error;
525 }
526
527 if (euid != (uid_t) -1) {
528 new->euid = keuid;
529 if (!uid_eq(old->uid, keuid) &&
530 !uid_eq(old->euid, keuid) &&
531 !uid_eq(old->suid, keuid) &&
532 !ns_capable(old->user_ns, CAP_SETUID))
533 goto error;
534 }
535
536 if (!uid_eq(new->uid, old->uid)) {
537 retval = set_user(new);
538 if (retval < 0)
539 goto error;
540 }
541 if (ruid != (uid_t) -1 ||
542 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
543 new->suid = new->euid;
544 new->fsuid = new->euid;
545
546 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
547 if (retval < 0)
548 goto error;
549
550 return commit_creds(new);
551
552error:
553 abort_creds(new);
554 return retval;
555}
556
557SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
558{
559 return __sys_setreuid(ruid, euid);
560}
561
562
563
564
565
566
567
568
569
570
571
572
573long __sys_setuid(uid_t uid)
574{
575 struct user_namespace *ns = current_user_ns();
576 const struct cred *old;
577 struct cred *new;
578 int retval;
579 kuid_t kuid;
580
581 kuid = make_kuid(ns, uid);
582 if (!uid_valid(kuid))
583 return -EINVAL;
584
585 new = prepare_creds();
586 if (!new)
587 return -ENOMEM;
588 old = current_cred();
589
590 retval = -EPERM;
591 if (ns_capable(old->user_ns, CAP_SETUID)) {
592 new->suid = new->uid = kuid;
593 if (!uid_eq(kuid, old->uid)) {
594 retval = set_user(new);
595 if (retval < 0)
596 goto error;
597 }
598 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
599 goto error;
600 }
601
602 new->fsuid = new->euid = kuid;
603
604 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
605 if (retval < 0)
606 goto error;
607
608 return commit_creds(new);
609
610error:
611 abort_creds(new);
612 return retval;
613}
614
615SYSCALL_DEFINE1(setuid, uid_t, uid)
616{
617 return __sys_setuid(uid);
618}
619
620
621
622
623
624
625long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
626{
627 struct user_namespace *ns = current_user_ns();
628 const struct cred *old;
629 struct cred *new;
630 int retval;
631 kuid_t kruid, keuid, ksuid;
632
633 kruid = make_kuid(ns, ruid);
634 keuid = make_kuid(ns, euid);
635 ksuid = make_kuid(ns, suid);
636
637 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
638 return -EINVAL;
639
640 if ((euid != (uid_t) -1) && !uid_valid(keuid))
641 return -EINVAL;
642
643 if ((suid != (uid_t) -1) && !uid_valid(ksuid))
644 return -EINVAL;
645
646 new = prepare_creds();
647 if (!new)
648 return -ENOMEM;
649
650 old = current_cred();
651
652 retval = -EPERM;
653 if (!ns_capable(old->user_ns, CAP_SETUID)) {
654 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
655 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
656 goto error;
657 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
658 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
659 goto error;
660 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
661 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
662 goto error;
663 }
664
665 if (ruid != (uid_t) -1) {
666 new->uid = kruid;
667 if (!uid_eq(kruid, old->uid)) {
668 retval = set_user(new);
669 if (retval < 0)
670 goto error;
671 }
672 }
673 if (euid != (uid_t) -1)
674 new->euid = keuid;
675 if (suid != (uid_t) -1)
676 new->suid = ksuid;
677 new->fsuid = new->euid;
678
679 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
680 if (retval < 0)
681 goto error;
682
683 return commit_creds(new);
684
685error:
686 abort_creds(new);
687 return retval;
688}
689
690SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
691{
692 return __sys_setresuid(ruid, euid, suid);
693}
694
695SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
696{
697 const struct cred *cred = current_cred();
698 int retval;
699 uid_t ruid, euid, suid;
700
701 ruid = from_kuid_munged(cred->user_ns, cred->uid);
702 euid = from_kuid_munged(cred->user_ns, cred->euid);
703 suid = from_kuid_munged(cred->user_ns, cred->suid);
704
705 retval = put_user(ruid, ruidp);
706 if (!retval) {
707 retval = put_user(euid, euidp);
708 if (!retval)
709 return put_user(suid, suidp);
710 }
711 return retval;
712}
713
714
715
716
717long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
718{
719 struct user_namespace *ns = current_user_ns();
720 const struct cred *old;
721 struct cred *new;
722 int retval;
723 kgid_t krgid, kegid, ksgid;
724
725 krgid = make_kgid(ns, rgid);
726 kegid = make_kgid(ns, egid);
727 ksgid = make_kgid(ns, sgid);
728
729 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
730 return -EINVAL;
731 if ((egid != (gid_t) -1) && !gid_valid(kegid))
732 return -EINVAL;
733 if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
734 return -EINVAL;
735
736 new = prepare_creds();
737 if (!new)
738 return -ENOMEM;
739 old = current_cred();
740
741 retval = -EPERM;
742 if (!ns_capable(old->user_ns, CAP_SETGID)) {
743 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
744 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
745 goto error;
746 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
747 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
748 goto error;
749 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
750 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
751 goto error;
752 }
753
754 if (rgid != (gid_t) -1)
755 new->gid = krgid;
756 if (egid != (gid_t) -1)
757 new->egid = kegid;
758 if (sgid != (gid_t) -1)
759 new->sgid = ksgid;
760 new->fsgid = new->egid;
761
762 return commit_creds(new);
763
764error:
765 abort_creds(new);
766 return retval;
767}
768
769SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
770{
771 return __sys_setresgid(rgid, egid, sgid);
772}
773
774SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
775{
776 const struct cred *cred = current_cred();
777 int retval;
778 gid_t rgid, egid, sgid;
779
780 rgid = from_kgid_munged(cred->user_ns, cred->gid);
781 egid = from_kgid_munged(cred->user_ns, cred->egid);
782 sgid = from_kgid_munged(cred->user_ns, cred->sgid);
783
784 retval = put_user(rgid, rgidp);
785 if (!retval) {
786 retval = put_user(egid, egidp);
787 if (!retval)
788 retval = put_user(sgid, sgidp);
789 }
790
791 return retval;
792}
793
794
795
796
797
798
799
800
801long __sys_setfsuid(uid_t uid)
802{
803 const struct cred *old;
804 struct cred *new;
805 uid_t old_fsuid;
806 kuid_t kuid;
807
808 old = current_cred();
809 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
810
811 kuid = make_kuid(old->user_ns, uid);
812 if (!uid_valid(kuid))
813 return old_fsuid;
814
815 new = prepare_creds();
816 if (!new)
817 return old_fsuid;
818
819 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
820 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
821 ns_capable(old->user_ns, CAP_SETUID)) {
822 if (!uid_eq(kuid, old->fsuid)) {
823 new->fsuid = kuid;
824 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
825 goto change_okay;
826 }
827 }
828
829 abort_creds(new);
830 return old_fsuid;
831
832change_okay:
833 commit_creds(new);
834 return old_fsuid;
835}
836
837SYSCALL_DEFINE1(setfsuid, uid_t, uid)
838{
839 return __sys_setfsuid(uid);
840}
841
842
843
844
845long __sys_setfsgid(gid_t gid)
846{
847 const struct cred *old;
848 struct cred *new;
849 gid_t old_fsgid;
850 kgid_t kgid;
851
852 old = current_cred();
853 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
854
855 kgid = make_kgid(old->user_ns, gid);
856 if (!gid_valid(kgid))
857 return old_fsgid;
858
859 new = prepare_creds();
860 if (!new)
861 return old_fsgid;
862
863 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
864 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
865 ns_capable(old->user_ns, CAP_SETGID)) {
866 if (!gid_eq(kgid, old->fsgid)) {
867 new->fsgid = kgid;
868 goto change_okay;
869 }
870 }
871
872 abort_creds(new);
873 return old_fsgid;
874
875change_okay:
876 commit_creds(new);
877 return old_fsgid;
878}
879
880SYSCALL_DEFINE1(setfsgid, gid_t, gid)
881{
882 return __sys_setfsgid(gid);
883}
884#endif
885
886
887
888
889
890
891
892
893
894
895SYSCALL_DEFINE0(getpid)
896{
897 return task_tgid_vnr(current);
898}
899
900
901SYSCALL_DEFINE0(gettid)
902{
903 return task_pid_vnr(current);
904}
905
906
907
908
909
910
911
912SYSCALL_DEFINE0(getppid)
913{
914 int pid;
915
916 rcu_read_lock();
917 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
918 rcu_read_unlock();
919
920 return pid;
921}
922
923SYSCALL_DEFINE0(getuid)
924{
925
926 return from_kuid_munged(current_user_ns(), current_uid());
927}
928
929SYSCALL_DEFINE0(geteuid)
930{
931
932 return from_kuid_munged(current_user_ns(), current_euid());
933}
934
935SYSCALL_DEFINE0(getgid)
936{
937
938 return from_kgid_munged(current_user_ns(), current_gid());
939}
940
941SYSCALL_DEFINE0(getegid)
942{
943
944 return from_kgid_munged(current_user_ns(), current_egid());
945}
946
947static void do_sys_times(struct tms *tms)
948{
949 u64 tgutime, tgstime, cutime, cstime;
950
951 thread_group_cputime_adjusted(current, &tgutime, &tgstime);
952 cutime = current->signal->cutime;
953 cstime = current->signal->cstime;
954 tms->tms_utime = nsec_to_clock_t(tgutime);
955 tms->tms_stime = nsec_to_clock_t(tgstime);
956 tms->tms_cutime = nsec_to_clock_t(cutime);
957 tms->tms_cstime = nsec_to_clock_t(cstime);
958}
959
960SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
961{
962 if (tbuf) {
963 struct tms tmp;
964
965 do_sys_times(&tmp);
966 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
967 return -EFAULT;
968 }
969 force_successful_syscall_return();
970 return (long) jiffies_64_to_clock_t(get_jiffies_64());
971}
972
973#ifdef CONFIG_COMPAT
974static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
975{
976 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
977}
978
979COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
980{
981 if (tbuf) {
982 struct tms tms;
983 struct compat_tms tmp;
984
985 do_sys_times(&tms);
986
987 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
988 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
989 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
990 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
991 if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
992 return -EFAULT;
993 }
994 force_successful_syscall_return();
995 return compat_jiffies_to_clock_t(jiffies);
996}
997#endif
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1011{
1012 struct task_struct *p;
1013 struct task_struct *group_leader = current->group_leader;
1014 struct pid *pgrp;
1015 int err;
1016
1017 if (!pid)
1018 pid = task_pid_vnr(group_leader);
1019 if (!pgid)
1020 pgid = pid;
1021 if (pgid < 0)
1022 return -EINVAL;
1023 rcu_read_lock();
1024
1025
1026
1027
1028 write_lock_irq(&tasklist_lock);
1029
1030 err = -ESRCH;
1031 p = find_task_by_vpid(pid);
1032 if (!p)
1033 goto out;
1034
1035 err = -EINVAL;
1036 if (!thread_group_leader(p))
1037 goto out;
1038
1039 if (same_thread_group(p->real_parent, group_leader)) {
1040 err = -EPERM;
1041 if (task_session(p) != task_session(group_leader))
1042 goto out;
1043 err = -EACCES;
1044 if (!(p->flags & PF_FORKNOEXEC))
1045 goto out;
1046 } else {
1047 err = -ESRCH;
1048 if (p != group_leader)
1049 goto out;
1050 }
1051
1052 err = -EPERM;
1053 if (p->signal->leader)
1054 goto out;
1055
1056 pgrp = task_pid(p);
1057 if (pgid != pid) {
1058 struct task_struct *g;
1059
1060 pgrp = find_vpid(pgid);
1061 g = pid_task(pgrp, PIDTYPE_PGID);
1062 if (!g || task_session(g) != task_session(group_leader))
1063 goto out;
1064 }
1065
1066 err = security_task_setpgid(p, pgid);
1067 if (err)
1068 goto out;
1069
1070 if (task_pgrp(p) != pgrp)
1071 change_pid(p, PIDTYPE_PGID, pgrp);
1072
1073 err = 0;
1074out:
1075
1076 write_unlock_irq(&tasklist_lock);
1077 rcu_read_unlock();
1078 return err;
1079}
1080
1081static int do_getpgid(pid_t pid)
1082{
1083 struct task_struct *p;
1084 struct pid *grp;
1085 int retval;
1086
1087 rcu_read_lock();
1088 if (!pid)
1089 grp = task_pgrp(current);
1090 else {
1091 retval = -ESRCH;
1092 p = find_task_by_vpid(pid);
1093 if (!p)
1094 goto out;
1095 grp = task_pgrp(p);
1096 if (!grp)
1097 goto out;
1098
1099 retval = security_task_getpgid(p);
1100 if (retval)
1101 goto out;
1102 }
1103 retval = pid_vnr(grp);
1104out:
1105 rcu_read_unlock();
1106 return retval;
1107}
1108
1109SYSCALL_DEFINE1(getpgid, pid_t, pid)
1110{
1111 return do_getpgid(pid);
1112}
1113
1114#ifdef __ARCH_WANT_SYS_GETPGRP
1115
1116SYSCALL_DEFINE0(getpgrp)
1117{
1118 return do_getpgid(0);
1119}
1120
1121#endif
1122
1123SYSCALL_DEFINE1(getsid, pid_t, pid)
1124{
1125 struct task_struct *p;
1126 struct pid *sid;
1127 int retval;
1128
1129 rcu_read_lock();
1130 if (!pid)
1131 sid = task_session(current);
1132 else {
1133 retval = -ESRCH;
1134 p = find_task_by_vpid(pid);
1135 if (!p)
1136 goto out;
1137 sid = task_session(p);
1138 if (!sid)
1139 goto out;
1140
1141 retval = security_task_getsid(p);
1142 if (retval)
1143 goto out;
1144 }
1145 retval = pid_vnr(sid);
1146out:
1147 rcu_read_unlock();
1148 return retval;
1149}
1150
1151static void set_special_pids(struct pid *pid)
1152{
1153 struct task_struct *curr = current->group_leader;
1154
1155 if (task_session(curr) != pid)
1156 change_pid(curr, PIDTYPE_SID, pid);
1157
1158 if (task_pgrp(curr) != pid)
1159 change_pid(curr, PIDTYPE_PGID, pid);
1160}
1161
1162int ksys_setsid(void)
1163{
1164 struct task_struct *group_leader = current->group_leader;
1165 struct pid *sid = task_pid(group_leader);
1166 pid_t session = pid_vnr(sid);
1167 int err = -EPERM;
1168
1169 write_lock_irq(&tasklist_lock);
1170
1171 if (group_leader->signal->leader)
1172 goto out;
1173
1174
1175
1176
1177 if (pid_task(sid, PIDTYPE_PGID))
1178 goto out;
1179
1180 group_leader->signal->leader = 1;
1181 set_special_pids(sid);
1182
1183 proc_clear_tty(group_leader);
1184
1185 err = session;
1186out:
1187 write_unlock_irq(&tasklist_lock);
1188 if (err > 0) {
1189 proc_sid_connector(group_leader);
1190 sched_autogroup_create_attach(group_leader);
1191 }
1192 return err;
1193}
1194
1195SYSCALL_DEFINE0(setsid)
1196{
1197 return ksys_setsid();
1198}
1199
1200DECLARE_RWSEM(uts_sem);
1201
1202#ifdef COMPAT_UTS_MACHINE
1203#define override_architecture(name) \
1204 (personality(current->personality) == PER_LINUX32 && \
1205 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1206 sizeof(COMPAT_UTS_MACHINE)))
1207#else
1208#define override_architecture(name) 0
1209#endif
1210
1211
1212
1213
1214
1215
1216static int override_release(char __user *release, size_t len)
1217{
1218 int ret = 0;
1219
1220 if (current->personality & UNAME26) {
1221 const char *rest = UTS_RELEASE;
1222 char buf[65] = { 0 };
1223 int ndots = 0;
1224 unsigned v;
1225 size_t copy;
1226
1227 while (*rest) {
1228 if (*rest == '.' && ++ndots >= 3)
1229 break;
1230 if (!isdigit(*rest) && *rest != '.')
1231 break;
1232 rest++;
1233 }
1234 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
1235 copy = clamp_t(size_t, len, 1, sizeof(buf));
1236 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1237 ret = copy_to_user(release, buf, copy + 1);
1238 }
1239 return ret;
1240}
1241
1242SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1243{
1244 int errno = 0;
1245
1246 down_read(&uts_sem);
1247 if (copy_to_user(name, utsname(), sizeof *name))
1248 errno = -EFAULT;
1249 up_read(&uts_sem);
1250
1251 if (!errno && override_release(name->release, sizeof(name->release)))
1252 errno = -EFAULT;
1253 if (!errno && override_architecture(name))
1254 errno = -EFAULT;
1255 return errno;
1256}
1257
1258#ifdef __ARCH_WANT_SYS_OLD_UNAME
1259
1260
1261
1262SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1263{
1264 int error = 0;
1265
1266 if (!name)
1267 return -EFAULT;
1268
1269 down_read(&uts_sem);
1270 if (copy_to_user(name, utsname(), sizeof(*name)))
1271 error = -EFAULT;
1272 up_read(&uts_sem);
1273
1274 if (!error && override_release(name->release, sizeof(name->release)))
1275 error = -EFAULT;
1276 if (!error && override_architecture(name))
1277 error = -EFAULT;
1278 return error;
1279}
1280
1281SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1282{
1283 int error;
1284
1285 if (!name)
1286 return -EFAULT;
1287 if (!access_ok(name, sizeof(struct oldold_utsname)))
1288 return -EFAULT;
1289
1290 down_read(&uts_sem);
1291 error = __copy_to_user(&name->sysname, &utsname()->sysname,
1292 __OLD_UTS_LEN);
1293 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1294 error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1295 __OLD_UTS_LEN);
1296 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1297 error |= __copy_to_user(&name->release, &utsname()->release,
1298 __OLD_UTS_LEN);
1299 error |= __put_user(0, name->release + __OLD_UTS_LEN);
1300 error |= __copy_to_user(&name->version, &utsname()->version,
1301 __OLD_UTS_LEN);
1302 error |= __put_user(0, name->version + __OLD_UTS_LEN);
1303 error |= __copy_to_user(&name->machine, &utsname()->machine,
1304 __OLD_UTS_LEN);
1305 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1306 up_read(&uts_sem);
1307
1308 if (!error && override_architecture(name))
1309 error = -EFAULT;
1310 if (!error && override_release(name->release, sizeof(name->release)))
1311 error = -EFAULT;
1312 return error ? -EFAULT : 0;
1313}
1314#endif
1315
1316SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1317{
1318 int errno;
1319 char tmp[__NEW_UTS_LEN];
1320
1321 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1322 return -EPERM;
1323
1324 if (len < 0 || len > __NEW_UTS_LEN)
1325 return -EINVAL;
1326 down_write(&uts_sem);
1327 errno = -EFAULT;
1328 if (!copy_from_user(tmp, name, len)) {
1329 struct new_utsname *u = utsname();
1330
1331 memcpy(u->nodename, tmp, len);
1332 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1333 errno = 0;
1334 uts_proc_notify(UTS_PROC_HOSTNAME);
1335 }
1336 up_write(&uts_sem);
1337 return errno;
1338}
1339
1340#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1341
1342SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1343{
1344 int i, errno;
1345 struct new_utsname *u;
1346
1347 if (len < 0)
1348 return -EINVAL;
1349 down_read(&uts_sem);
1350 u = utsname();
1351 i = 1 + strlen(u->nodename);
1352 if (i > len)
1353 i = len;
1354 errno = 0;
1355 if (copy_to_user(name, u->nodename, i))
1356 errno = -EFAULT;
1357 up_read(&uts_sem);
1358 return errno;
1359}
1360
1361#endif
1362
1363
1364
1365
1366
1367SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1368{
1369 int errno;
1370 char tmp[__NEW_UTS_LEN];
1371
1372 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1373 return -EPERM;
1374 if (len < 0 || len > __NEW_UTS_LEN)
1375 return -EINVAL;
1376
1377 down_write(&uts_sem);
1378 errno = -EFAULT;
1379 if (!copy_from_user(tmp, name, len)) {
1380 struct new_utsname *u = utsname();
1381
1382 memcpy(u->domainname, tmp, len);
1383 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1384 errno = 0;
1385 uts_proc_notify(UTS_PROC_DOMAINNAME);
1386 }
1387 up_write(&uts_sem);
1388 return errno;
1389}
1390
1391SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1392{
1393 struct rlimit value;
1394 int ret;
1395
1396 ret = do_prlimit(current, resource, NULL, &value);
1397 if (!ret)
1398 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1399
1400 return ret;
1401}
1402
1403#ifdef CONFIG_COMPAT
1404
1405COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
1406 struct compat_rlimit __user *, rlim)
1407{
1408 struct rlimit r;
1409 struct compat_rlimit r32;
1410
1411 if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit)))
1412 return -EFAULT;
1413
1414 if (r32.rlim_cur == COMPAT_RLIM_INFINITY)
1415 r.rlim_cur = RLIM_INFINITY;
1416 else
1417 r.rlim_cur = r32.rlim_cur;
1418 if (r32.rlim_max == COMPAT_RLIM_INFINITY)
1419 r.rlim_max = RLIM_INFINITY;
1420 else
1421 r.rlim_max = r32.rlim_max;
1422 return do_prlimit(current, resource, &r, NULL);
1423}
1424
1425COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
1426 struct compat_rlimit __user *, rlim)
1427{
1428 struct rlimit r;
1429 int ret;
1430
1431 ret = do_prlimit(current, resource, NULL, &r);
1432 if (!ret) {
1433 struct compat_rlimit r32;
1434 if (r.rlim_cur > COMPAT_RLIM_INFINITY)
1435 r32.rlim_cur = COMPAT_RLIM_INFINITY;
1436 else
1437 r32.rlim_cur = r.rlim_cur;
1438 if (r.rlim_max > COMPAT_RLIM_INFINITY)
1439 r32.rlim_max = COMPAT_RLIM_INFINITY;
1440 else
1441 r32.rlim_max = r.rlim_max;
1442
1443 if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit)))
1444 return -EFAULT;
1445 }
1446 return ret;
1447}
1448
1449#endif
1450
1451#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1452
1453
1454
1455
1456SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1457 struct rlimit __user *, rlim)
1458{
1459 struct rlimit x;
1460 if (resource >= RLIM_NLIMITS)
1461 return -EINVAL;
1462
1463 resource = array_index_nospec(resource, RLIM_NLIMITS);
1464 task_lock(current->group_leader);
1465 x = current->signal->rlim[resource];
1466 task_unlock(current->group_leader);
1467 if (x.rlim_cur > 0x7FFFFFFF)
1468 x.rlim_cur = 0x7FFFFFFF;
1469 if (x.rlim_max > 0x7FFFFFFF)
1470 x.rlim_max = 0x7FFFFFFF;
1471 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1472}
1473
1474#ifdef CONFIG_COMPAT
1475COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1476 struct compat_rlimit __user *, rlim)
1477{
1478 struct rlimit r;
1479
1480 if (resource >= RLIM_NLIMITS)
1481 return -EINVAL;
1482
1483 resource = array_index_nospec(resource, RLIM_NLIMITS);
1484 task_lock(current->group_leader);
1485 r = current->signal->rlim[resource];
1486 task_unlock(current->group_leader);
1487 if (r.rlim_cur > 0x7FFFFFFF)
1488 r.rlim_cur = 0x7FFFFFFF;
1489 if (r.rlim_max > 0x7FFFFFFF)
1490 r.rlim_max = 0x7FFFFFFF;
1491
1492 if (put_user(r.rlim_cur, &rlim->rlim_cur) ||
1493 put_user(r.rlim_max, &rlim->rlim_max))
1494 return -EFAULT;
1495 return 0;
1496}
1497#endif
1498
1499#endif
1500
1501static inline bool rlim64_is_infinity(__u64 rlim64)
1502{
1503#if BITS_PER_LONG < 64
1504 return rlim64 >= ULONG_MAX;
1505#else
1506 return rlim64 == RLIM64_INFINITY;
1507#endif
1508}
1509
1510static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1511{
1512 if (rlim->rlim_cur == RLIM_INFINITY)
1513 rlim64->rlim_cur = RLIM64_INFINITY;
1514 else
1515 rlim64->rlim_cur = rlim->rlim_cur;
1516 if (rlim->rlim_max == RLIM_INFINITY)
1517 rlim64->rlim_max = RLIM64_INFINITY;
1518 else
1519 rlim64->rlim_max = rlim->rlim_max;
1520}
1521
1522static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1523{
1524 if (rlim64_is_infinity(rlim64->rlim_cur))
1525 rlim->rlim_cur = RLIM_INFINITY;
1526 else
1527 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1528 if (rlim64_is_infinity(rlim64->rlim_max))
1529 rlim->rlim_max = RLIM_INFINITY;
1530 else
1531 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1532}
1533
1534
1535int do_prlimit(struct task_struct *tsk, unsigned int resource,
1536 struct rlimit *new_rlim, struct rlimit *old_rlim)
1537{
1538 struct rlimit *rlim;
1539 int retval = 0;
1540
1541 if (resource >= RLIM_NLIMITS)
1542 return -EINVAL;
1543 if (new_rlim) {
1544 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1545 return -EINVAL;
1546 if (resource == RLIMIT_NOFILE &&
1547 new_rlim->rlim_max > sysctl_nr_open)
1548 return -EPERM;
1549 }
1550
1551
1552 read_lock(&tasklist_lock);
1553 if (!tsk->sighand) {
1554 retval = -ESRCH;
1555 goto out;
1556 }
1557
1558 rlim = tsk->signal->rlim + resource;
1559 task_lock(tsk->group_leader);
1560 if (new_rlim) {
1561
1562
1563 if (new_rlim->rlim_max > rlim->rlim_max &&
1564 !capable(CAP_SYS_RESOURCE))
1565 retval = -EPERM;
1566 if (!retval)
1567 retval = security_task_setrlimit(tsk, resource, new_rlim);
1568 }
1569 if (!retval) {
1570 if (old_rlim)
1571 *old_rlim = *rlim;
1572 if (new_rlim)
1573 *rlim = *new_rlim;
1574 }
1575 task_unlock(tsk->group_leader);
1576
1577
1578
1579
1580
1581
1582 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1583 new_rlim->rlim_cur != RLIM_INFINITY &&
1584 IS_ENABLED(CONFIG_POSIX_TIMERS))
1585 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1586out:
1587 read_unlock(&tasklist_lock);
1588 return retval;
1589}
1590
1591
1592static int check_prlimit_permission(struct task_struct *task,
1593 unsigned int flags)
1594{
1595 const struct cred *cred = current_cred(), *tcred;
1596 bool id_match;
1597
1598 if (current == task)
1599 return 0;
1600
1601 tcred = __task_cred(task);
1602 id_match = (uid_eq(cred->uid, tcred->euid) &&
1603 uid_eq(cred->uid, tcred->suid) &&
1604 uid_eq(cred->uid, tcred->uid) &&
1605 gid_eq(cred->gid, tcred->egid) &&
1606 gid_eq(cred->gid, tcred->sgid) &&
1607 gid_eq(cred->gid, tcred->gid));
1608 if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1609 return -EPERM;
1610
1611 return security_task_prlimit(cred, tcred, flags);
1612}
1613
1614SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1615 const struct rlimit64 __user *, new_rlim,
1616 struct rlimit64 __user *, old_rlim)
1617{
1618 struct rlimit64 old64, new64;
1619 struct rlimit old, new;
1620 struct task_struct *tsk;
1621 unsigned int checkflags = 0;
1622 int ret;
1623
1624 if (old_rlim)
1625 checkflags |= LSM_PRLIMIT_READ;
1626
1627 if (new_rlim) {
1628 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1629 return -EFAULT;
1630 rlim64_to_rlim(&new64, &new);
1631 checkflags |= LSM_PRLIMIT_WRITE;
1632 }
1633
1634 rcu_read_lock();
1635 tsk = pid ? find_task_by_vpid(pid) : current;
1636 if (!tsk) {
1637 rcu_read_unlock();
1638 return -ESRCH;
1639 }
1640 ret = check_prlimit_permission(tsk, checkflags);
1641 if (ret) {
1642 rcu_read_unlock();
1643 return ret;
1644 }
1645 get_task_struct(tsk);
1646 rcu_read_unlock();
1647
1648 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1649 old_rlim ? &old : NULL);
1650
1651 if (!ret && old_rlim) {
1652 rlim_to_rlim64(&old, &old64);
1653 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1654 ret = -EFAULT;
1655 }
1656
1657 put_task_struct(tsk);
1658 return ret;
1659}
1660
1661SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1662{
1663 struct rlimit new_rlim;
1664
1665 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1666 return -EFAULT;
1667 return do_prlimit(current, resource, &new_rlim, NULL);
1668}
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1704{
1705 r->ru_nvcsw += t->nvcsw;
1706 r->ru_nivcsw += t->nivcsw;
1707 r->ru_minflt += t->min_flt;
1708 r->ru_majflt += t->maj_flt;
1709 r->ru_inblock += task_io_get_inblock(t);
1710 r->ru_oublock += task_io_get_oublock(t);
1711}
1712
1713void getrusage(struct task_struct *p, int who, struct rusage *r)
1714{
1715 struct task_struct *t;
1716 unsigned long flags;
1717 u64 tgutime, tgstime, utime, stime;
1718 unsigned long maxrss = 0;
1719
1720 memset((char *)r, 0, sizeof (*r));
1721 utime = stime = 0;
1722
1723 if (who == RUSAGE_THREAD) {
1724 task_cputime_adjusted(current, &utime, &stime);
1725 accumulate_thread_rusage(p, r);
1726 maxrss = p->signal->maxrss;
1727 goto out;
1728 }
1729
1730 if (!lock_task_sighand(p, &flags))
1731 return;
1732
1733 switch (who) {
1734 case RUSAGE_BOTH:
1735 case RUSAGE_CHILDREN:
1736 utime = p->signal->cutime;
1737 stime = p->signal->cstime;
1738 r->ru_nvcsw = p->signal->cnvcsw;
1739 r->ru_nivcsw = p->signal->cnivcsw;
1740 r->ru_minflt = p->signal->cmin_flt;
1741 r->ru_majflt = p->signal->cmaj_flt;
1742 r->ru_inblock = p->signal->cinblock;
1743 r->ru_oublock = p->signal->coublock;
1744 maxrss = p->signal->cmaxrss;
1745
1746 if (who == RUSAGE_CHILDREN)
1747 break;
1748
1749 case RUSAGE_SELF:
1750 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1751 utime += tgutime;
1752 stime += tgstime;
1753 r->ru_nvcsw += p->signal->nvcsw;
1754 r->ru_nivcsw += p->signal->nivcsw;
1755 r->ru_minflt += p->signal->min_flt;
1756 r->ru_majflt += p->signal->maj_flt;
1757 r->ru_inblock += p->signal->inblock;
1758 r->ru_oublock += p->signal->oublock;
1759 if (maxrss < p->signal->maxrss)
1760 maxrss = p->signal->maxrss;
1761 t = p;
1762 do {
1763 accumulate_thread_rusage(t, r);
1764 } while_each_thread(p, t);
1765 break;
1766
1767 default:
1768 BUG();
1769 }
1770 unlock_task_sighand(p, &flags);
1771
1772out:
1773 r->ru_utime = ns_to_timeval(utime);
1774 r->ru_stime = ns_to_timeval(stime);
1775
1776 if (who != RUSAGE_CHILDREN) {
1777 struct mm_struct *mm = get_task_mm(p);
1778
1779 if (mm) {
1780 setmax_mm_hiwater_rss(&maxrss, mm);
1781 mmput(mm);
1782 }
1783 }
1784 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024);
1785}
1786
1787SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1788{
1789 struct rusage r;
1790
1791 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1792 who != RUSAGE_THREAD)
1793 return -EINVAL;
1794
1795 getrusage(current, who, &r);
1796 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1797}
1798
1799#ifdef CONFIG_COMPAT
1800COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1801{
1802 struct rusage r;
1803
1804 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1805 who != RUSAGE_THREAD)
1806 return -EINVAL;
1807
1808 getrusage(current, who, &r);
1809 return put_compat_rusage(&r, ru);
1810}
1811#endif
1812
1813SYSCALL_DEFINE1(umask, int, mask)
1814{
1815 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1816 return mask;
1817}
1818
1819static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1820{
1821 struct fd exe;
1822 struct file *old_exe, *exe_file;
1823 struct inode *inode;
1824 int err;
1825
1826 exe = fdget(fd);
1827 if (!exe.file)
1828 return -EBADF;
1829
1830 inode = file_inode(exe.file);
1831
1832
1833
1834
1835
1836
1837 err = -EACCES;
1838 if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1839 goto exit;
1840
1841 err = inode_permission(inode, MAY_EXEC);
1842 if (err)
1843 goto exit;
1844
1845
1846
1847
1848 exe_file = get_mm_exe_file(mm);
1849 err = -EBUSY;
1850 if (exe_file) {
1851 struct vm_area_struct *vma;
1852
1853 down_read(&mm->mmap_sem);
1854 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1855 if (!vma->vm_file)
1856 continue;
1857 if (path_equal(&vma->vm_file->f_path,
1858 &exe_file->f_path))
1859 goto exit_err;
1860 }
1861
1862 up_read(&mm->mmap_sem);
1863 fput(exe_file);
1864 }
1865
1866 err = 0;
1867
1868 get_file(exe.file);
1869 old_exe = xchg(&mm->exe_file, exe.file);
1870 if (old_exe)
1871 fput(old_exe);
1872exit:
1873 fdput(exe);
1874 return err;
1875exit_err:
1876 up_read(&mm->mmap_sem);
1877 fput(exe_file);
1878 goto exit;
1879}
1880
1881
1882
1883
1884
1885static int validate_prctl_map(struct prctl_mm_map *prctl_map)
1886{
1887 unsigned long mmap_max_addr = TASK_SIZE;
1888 struct mm_struct *mm = current->mm;
1889 int error = -EINVAL, i;
1890
1891 static const unsigned char offsets[] = {
1892 offsetof(struct prctl_mm_map, start_code),
1893 offsetof(struct prctl_mm_map, end_code),
1894 offsetof(struct prctl_mm_map, start_data),
1895 offsetof(struct prctl_mm_map, end_data),
1896 offsetof(struct prctl_mm_map, start_brk),
1897 offsetof(struct prctl_mm_map, brk),
1898 offsetof(struct prctl_mm_map, start_stack),
1899 offsetof(struct prctl_mm_map, arg_start),
1900 offsetof(struct prctl_mm_map, arg_end),
1901 offsetof(struct prctl_mm_map, env_start),
1902 offsetof(struct prctl_mm_map, env_end),
1903 };
1904
1905
1906
1907
1908
1909 for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1910 u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1911
1912 if ((unsigned long)val >= mmap_max_addr ||
1913 (unsigned long)val < mmap_min_addr)
1914 goto out;
1915 }
1916
1917
1918
1919
1920#define __prctl_check_order(__m1, __op, __m2) \
1921 ((unsigned long)prctl_map->__m1 __op \
1922 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1923 error = __prctl_check_order(start_code, <, end_code);
1924 error |= __prctl_check_order(start_data, <, end_data);
1925 error |= __prctl_check_order(start_brk, <=, brk);
1926 error |= __prctl_check_order(arg_start, <=, arg_end);
1927 error |= __prctl_check_order(env_start, <=, env_end);
1928 if (error)
1929 goto out;
1930#undef __prctl_check_order
1931
1932 error = -EINVAL;
1933
1934
1935
1936
1937 if (prctl_map->start_brk <= prctl_map->end_data ||
1938 prctl_map->brk <= prctl_map->end_data)
1939 goto out;
1940
1941
1942
1943
1944 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1945 prctl_map->start_brk, prctl_map->end_data,
1946 prctl_map->start_data))
1947 goto out;
1948
1949
1950
1951
1952 if (prctl_map->auxv_size) {
1953 if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv))
1954 goto out;
1955 }
1956
1957
1958
1959
1960
1961
1962 if (prctl_map->exe_fd != (u32)-1) {
1963 if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1964 goto out;
1965 }
1966
1967 error = 0;
1968out:
1969 return error;
1970}
1971
1972#ifdef CONFIG_CHECKPOINT_RESTORE
1973static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1974{
1975 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1976 unsigned long user_auxv[AT_VECTOR_SIZE];
1977 struct mm_struct *mm = current->mm;
1978 int error;
1979
1980 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1981 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1982
1983 if (opt == PR_SET_MM_MAP_SIZE)
1984 return put_user((unsigned int)sizeof(prctl_map),
1985 (unsigned int __user *)addr);
1986
1987 if (data_size != sizeof(prctl_map))
1988 return -EINVAL;
1989
1990 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1991 return -EFAULT;
1992
1993 error = validate_prctl_map(&prctl_map);
1994 if (error)
1995 return error;
1996
1997 if (prctl_map.auxv_size) {
1998 memset(user_auxv, 0, sizeof(user_auxv));
1999 if (copy_from_user(user_auxv,
2000 (const void __user *)prctl_map.auxv,
2001 prctl_map.auxv_size))
2002 return -EFAULT;
2003
2004
2005 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
2006 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
2007 }
2008
2009 if (prctl_map.exe_fd != (u32)-1) {
2010 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
2011 if (error)
2012 return error;
2013 }
2014
2015
2016
2017
2018
2019 down_read(&mm->mmap_sem);
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033 spin_lock(&mm->arg_lock);
2034 mm->start_code = prctl_map.start_code;
2035 mm->end_code = prctl_map.end_code;
2036 mm->start_data = prctl_map.start_data;
2037 mm->end_data = prctl_map.end_data;
2038 mm->start_brk = prctl_map.start_brk;
2039 mm->brk = prctl_map.brk;
2040 mm->start_stack = prctl_map.start_stack;
2041 mm->arg_start = prctl_map.arg_start;
2042 mm->arg_end = prctl_map.arg_end;
2043 mm->env_start = prctl_map.env_start;
2044 mm->env_end = prctl_map.env_end;
2045 spin_unlock(&mm->arg_lock);
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055 if (prctl_map.auxv_size)
2056 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
2057
2058 up_read(&mm->mmap_sem);
2059 return 0;
2060}
2061#endif
2062
2063static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
2064 unsigned long len)
2065{
2066
2067
2068
2069
2070
2071
2072 unsigned long user_auxv[AT_VECTOR_SIZE];
2073
2074 if (len > sizeof(user_auxv))
2075 return -EINVAL;
2076
2077 if (copy_from_user(user_auxv, (const void __user *)addr, len))
2078 return -EFAULT;
2079
2080
2081 user_auxv[AT_VECTOR_SIZE - 2] = 0;
2082 user_auxv[AT_VECTOR_SIZE - 1] = 0;
2083
2084 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2085
2086 task_lock(current);
2087 memcpy(mm->saved_auxv, user_auxv, len);
2088 task_unlock(current);
2089
2090 return 0;
2091}
2092
2093static int prctl_set_mm(int opt, unsigned long addr,
2094 unsigned long arg4, unsigned long arg5)
2095{
2096 struct mm_struct *mm = current->mm;
2097 struct prctl_mm_map prctl_map;
2098 struct vm_area_struct *vma;
2099 int error;
2100
2101 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
2102 opt != PR_SET_MM_MAP &&
2103 opt != PR_SET_MM_MAP_SIZE)))
2104 return -EINVAL;
2105
2106#ifdef CONFIG_CHECKPOINT_RESTORE
2107 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
2108 return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
2109#endif
2110
2111 if (!capable(CAP_SYS_RESOURCE))
2112 return -EPERM;
2113
2114 if (opt == PR_SET_MM_EXE_FILE)
2115 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
2116
2117 if (opt == PR_SET_MM_AUXV)
2118 return prctl_set_auxv(mm, addr, arg4);
2119
2120 if (addr >= TASK_SIZE || addr < mmap_min_addr)
2121 return -EINVAL;
2122
2123 error = -EINVAL;
2124
2125
2126
2127
2128
2129
2130 down_read(&mm->mmap_sem);
2131 vma = find_vma(mm, addr);
2132
2133 spin_lock(&mm->arg_lock);
2134 prctl_map.start_code = mm->start_code;
2135 prctl_map.end_code = mm->end_code;
2136 prctl_map.start_data = mm->start_data;
2137 prctl_map.end_data = mm->end_data;
2138 prctl_map.start_brk = mm->start_brk;
2139 prctl_map.brk = mm->brk;
2140 prctl_map.start_stack = mm->start_stack;
2141 prctl_map.arg_start = mm->arg_start;
2142 prctl_map.arg_end = mm->arg_end;
2143 prctl_map.env_start = mm->env_start;
2144 prctl_map.env_end = mm->env_end;
2145 prctl_map.auxv = NULL;
2146 prctl_map.auxv_size = 0;
2147 prctl_map.exe_fd = -1;
2148
2149 switch (opt) {
2150 case PR_SET_MM_START_CODE:
2151 prctl_map.start_code = addr;
2152 break;
2153 case PR_SET_MM_END_CODE:
2154 prctl_map.end_code = addr;
2155 break;
2156 case PR_SET_MM_START_DATA:
2157 prctl_map.start_data = addr;
2158 break;
2159 case PR_SET_MM_END_DATA:
2160 prctl_map.end_data = addr;
2161 break;
2162 case PR_SET_MM_START_STACK:
2163 prctl_map.start_stack = addr;
2164 break;
2165 case PR_SET_MM_START_BRK:
2166 prctl_map.start_brk = addr;
2167 break;
2168 case PR_SET_MM_BRK:
2169 prctl_map.brk = addr;
2170 break;
2171 case PR_SET_MM_ARG_START:
2172 prctl_map.arg_start = addr;
2173 break;
2174 case PR_SET_MM_ARG_END:
2175 prctl_map.arg_end = addr;
2176 break;
2177 case PR_SET_MM_ENV_START:
2178 prctl_map.env_start = addr;
2179 break;
2180 case PR_SET_MM_ENV_END:
2181 prctl_map.env_end = addr;
2182 break;
2183 default:
2184 goto out;
2185 }
2186
2187 error = validate_prctl_map(&prctl_map);
2188 if (error)
2189 goto out;
2190
2191 switch (opt) {
2192
2193
2194
2195
2196
2197
2198
2199 case PR_SET_MM_START_STACK:
2200 case PR_SET_MM_ARG_START:
2201 case PR_SET_MM_ARG_END:
2202 case PR_SET_MM_ENV_START:
2203 case PR_SET_MM_ENV_END:
2204 if (!vma) {
2205 error = -EFAULT;
2206 goto out;
2207 }
2208 }
2209
2210 mm->start_code = prctl_map.start_code;
2211 mm->end_code = prctl_map.end_code;
2212 mm->start_data = prctl_map.start_data;
2213 mm->end_data = prctl_map.end_data;
2214 mm->start_brk = prctl_map.start_brk;
2215 mm->brk = prctl_map.brk;
2216 mm->start_stack = prctl_map.start_stack;
2217 mm->arg_start = prctl_map.arg_start;
2218 mm->arg_end = prctl_map.arg_end;
2219 mm->env_start = prctl_map.env_start;
2220 mm->env_end = prctl_map.env_end;
2221
2222 error = 0;
2223out:
2224 spin_unlock(&mm->arg_lock);
2225 up_read(&mm->mmap_sem);
2226 return error;
2227}
2228
2229#ifdef CONFIG_CHECKPOINT_RESTORE
2230static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2231{
2232 return put_user(me->clear_child_tid, tid_addr);
2233}
2234#else
2235static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2236{
2237 return -EINVAL;
2238}
2239#endif
2240
2241static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2242{
2243
2244
2245
2246
2247
2248
2249
2250
2251 if (p->signal->has_child_subreaper ||
2252 is_child_reaper(task_pid(p)))
2253 return 0;
2254
2255 p->signal->has_child_subreaper = 1;
2256 return 1;
2257}
2258
2259int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2260{
2261 return -EINVAL;
2262}
2263
2264int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2265 unsigned long ctrl)
2266{
2267 return -EINVAL;
2268}
2269
2270#define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE)
2271
2272SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2273 unsigned long, arg4, unsigned long, arg5)
2274{
2275 struct task_struct *me = current;
2276 unsigned char comm[sizeof(me->comm)];
2277 long error;
2278
2279 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2280 if (error != -ENOSYS)
2281 return error;
2282
2283 error = 0;
2284 switch (option) {
2285 case PR_SET_PDEATHSIG:
2286 if (!valid_signal(arg2)) {
2287 error = -EINVAL;
2288 break;
2289 }
2290 me->pdeath_signal = arg2;
2291 break;
2292 case PR_GET_PDEATHSIG:
2293 error = put_user(me->pdeath_signal, (int __user *)arg2);
2294 break;
2295 case PR_GET_DUMPABLE:
2296 error = get_dumpable(me->mm);
2297 break;
2298 case PR_SET_DUMPABLE:
2299 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2300 error = -EINVAL;
2301 break;
2302 }
2303 set_dumpable(me->mm, arg2);
2304 break;
2305
2306 case PR_SET_UNALIGN:
2307 error = SET_UNALIGN_CTL(me, arg2);
2308 break;
2309 case PR_GET_UNALIGN:
2310 error = GET_UNALIGN_CTL(me, arg2);
2311 break;
2312 case PR_SET_FPEMU:
2313 error = SET_FPEMU_CTL(me, arg2);
2314 break;
2315 case PR_GET_FPEMU:
2316 error = GET_FPEMU_CTL(me, arg2);
2317 break;
2318 case PR_SET_FPEXC:
2319 error = SET_FPEXC_CTL(me, arg2);
2320 break;
2321 case PR_GET_FPEXC:
2322 error = GET_FPEXC_CTL(me, arg2);
2323 break;
2324 case PR_GET_TIMING:
2325 error = PR_TIMING_STATISTICAL;
2326 break;
2327 case PR_SET_TIMING:
2328 if (arg2 != PR_TIMING_STATISTICAL)
2329 error = -EINVAL;
2330 break;
2331 case PR_SET_NAME:
2332 comm[sizeof(me->comm) - 1] = 0;
2333 if (strncpy_from_user(comm, (char __user *)arg2,
2334 sizeof(me->comm) - 1) < 0)
2335 return -EFAULT;
2336 set_task_comm(me, comm);
2337 proc_comm_connector(me);
2338 break;
2339 case PR_GET_NAME:
2340 get_task_comm(comm, me);
2341 if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2342 return -EFAULT;
2343 break;
2344 case PR_GET_ENDIAN:
2345 error = GET_ENDIAN(me, arg2);
2346 break;
2347 case PR_SET_ENDIAN:
2348 error = SET_ENDIAN(me, arg2);
2349 break;
2350 case PR_GET_SECCOMP:
2351 error = prctl_get_seccomp();
2352 break;
2353 case PR_SET_SECCOMP:
2354 error = prctl_set_seccomp(arg2, (char __user *)arg3);
2355 break;
2356 case PR_GET_TSC:
2357 error = GET_TSC_CTL(arg2);
2358 break;
2359 case PR_SET_TSC:
2360 error = SET_TSC_CTL(arg2);
2361 break;
2362 case PR_TASK_PERF_EVENTS_DISABLE:
2363 error = perf_event_task_disable();
2364 break;
2365 case PR_TASK_PERF_EVENTS_ENABLE:
2366 error = perf_event_task_enable();
2367 break;
2368 case PR_GET_TIMERSLACK:
2369 if (current->timer_slack_ns > ULONG_MAX)
2370 error = ULONG_MAX;
2371 else
2372 error = current->timer_slack_ns;
2373 break;
2374 case PR_SET_TIMERSLACK:
2375 if (arg2 <= 0)
2376 current->timer_slack_ns =
2377 current->default_timer_slack_ns;
2378 else
2379 current->timer_slack_ns = arg2;
2380 break;
2381 case PR_MCE_KILL:
2382 if (arg4 | arg5)
2383 return -EINVAL;
2384 switch (arg2) {
2385 case PR_MCE_KILL_CLEAR:
2386 if (arg3 != 0)
2387 return -EINVAL;
2388 current->flags &= ~PF_MCE_PROCESS;
2389 break;
2390 case PR_MCE_KILL_SET:
2391 current->flags |= PF_MCE_PROCESS;
2392 if (arg3 == PR_MCE_KILL_EARLY)
2393 current->flags |= PF_MCE_EARLY;
2394 else if (arg3 == PR_MCE_KILL_LATE)
2395 current->flags &= ~PF_MCE_EARLY;
2396 else if (arg3 == PR_MCE_KILL_DEFAULT)
2397 current->flags &=
2398 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
2399 else
2400 return -EINVAL;
2401 break;
2402 default:
2403 return -EINVAL;
2404 }
2405 break;
2406 case PR_MCE_KILL_GET:
2407 if (arg2 | arg3 | arg4 | arg5)
2408 return -EINVAL;
2409 if (current->flags & PF_MCE_PROCESS)
2410 error = (current->flags & PF_MCE_EARLY) ?
2411 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2412 else
2413 error = PR_MCE_KILL_DEFAULT;
2414 break;
2415 case PR_SET_MM:
2416 error = prctl_set_mm(arg2, arg3, arg4, arg5);
2417 break;
2418 case PR_GET_TID_ADDRESS:
2419 error = prctl_get_tid_address(me, (int __user **)arg2);
2420 break;
2421 case PR_SET_CHILD_SUBREAPER:
2422 me->signal->is_child_subreaper = !!arg2;
2423 if (!arg2)
2424 break;
2425
2426 walk_process_tree(me, propagate_has_child_subreaper, NULL);
2427 break;
2428 case PR_GET_CHILD_SUBREAPER:
2429 error = put_user(me->signal->is_child_subreaper,
2430 (int __user *)arg2);
2431 break;
2432 case PR_SET_NO_NEW_PRIVS:
2433 if (arg2 != 1 || arg3 || arg4 || arg5)
2434 return -EINVAL;
2435
2436 task_set_no_new_privs(current);
2437 break;
2438 case PR_GET_NO_NEW_PRIVS:
2439 if (arg2 || arg3 || arg4 || arg5)
2440 return -EINVAL;
2441 return task_no_new_privs(current) ? 1 : 0;
2442 case PR_GET_THP_DISABLE:
2443 if (arg2 || arg3 || arg4 || arg5)
2444 return -EINVAL;
2445 error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags);
2446 break;
2447 case PR_SET_THP_DISABLE:
2448 if (arg3 || arg4 || arg5)
2449 return -EINVAL;
2450 if (down_write_killable(&me->mm->mmap_sem))
2451 return -EINTR;
2452 if (arg2)
2453 set_bit(MMF_DISABLE_THP, &me->mm->flags);
2454 else
2455 clear_bit(MMF_DISABLE_THP, &me->mm->flags);
2456 up_write(&me->mm->mmap_sem);
2457 break;
2458 case PR_MPX_ENABLE_MANAGEMENT:
2459 if (arg2 || arg3 || arg4 || arg5)
2460 return -EINVAL;
2461 error = MPX_ENABLE_MANAGEMENT();
2462 break;
2463 case PR_MPX_DISABLE_MANAGEMENT:
2464 if (arg2 || arg3 || arg4 || arg5)
2465 return -EINVAL;
2466 error = MPX_DISABLE_MANAGEMENT();
2467 break;
2468 case PR_SET_FP_MODE:
2469 error = SET_FP_MODE(me, arg2);
2470 break;
2471 case PR_GET_FP_MODE:
2472 error = GET_FP_MODE(me);
2473 break;
2474 case PR_SVE_SET_VL:
2475 error = SVE_SET_VL(arg2);
2476 break;
2477 case PR_SVE_GET_VL:
2478 error = SVE_GET_VL();
2479 break;
2480 case PR_GET_SPECULATION_CTRL:
2481 if (arg3 || arg4 || arg5)
2482 return -EINVAL;
2483 error = arch_prctl_spec_ctrl_get(me, arg2);
2484 break;
2485 case PR_SET_SPECULATION_CTRL:
2486 if (arg4 || arg5)
2487 return -EINVAL;
2488 error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2489 break;
2490 case PR_PAC_RESET_KEYS:
2491 if (arg3 || arg4 || arg5)
2492 return -EINVAL;
2493 error = PAC_RESET_KEYS(me, arg2);
2494 break;
2495 case PR_SET_IO_FLUSHER:
2496 if (!capable(CAP_SYS_RESOURCE))
2497 return -EPERM;
2498
2499 if (arg3 || arg4 || arg5)
2500 return -EINVAL;
2501
2502 if (arg2 == 1)
2503 current->flags |= PR_IO_FLUSHER;
2504 else if (!arg2)
2505 current->flags &= ~PR_IO_FLUSHER;
2506 else
2507 return -EINVAL;
2508 break;
2509 case PR_GET_IO_FLUSHER:
2510 if (!capable(CAP_SYS_RESOURCE))
2511 return -EPERM;
2512
2513 if (arg2 || arg3 || arg4 || arg5)
2514 return -EINVAL;
2515
2516 error = (current->flags & PR_IO_FLUSHER) == PR_IO_FLUSHER;
2517 break;
2518 default:
2519 error = -EINVAL;
2520 break;
2521 }
2522 return error;
2523}
2524
2525SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2526 struct getcpu_cache __user *, unused)
2527{
2528 int err = 0;
2529 int cpu = raw_smp_processor_id();
2530
2531 if (cpup)
2532 err |= put_user(cpu, cpup);
2533 if (nodep)
2534 err |= put_user(cpu_to_node(cpu), nodep);
2535 return err ? -EFAULT : 0;
2536}
2537
2538
2539
2540
2541
2542static int do_sysinfo(struct sysinfo *info)
2543{
2544 unsigned long mem_total, sav_total;
2545 unsigned int mem_unit, bitcount;
2546 struct timespec64 tp;
2547
2548 memset(info, 0, sizeof(struct sysinfo));
2549
2550 ktime_get_boottime_ts64(&tp);
2551 timens_add_boottime(&tp);
2552 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2553
2554 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2555
2556 info->procs = nr_threads;
2557
2558 si_meminfo(info);
2559 si_swapinfo(info);
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570 mem_total = info->totalram + info->totalswap;
2571 if (mem_total < info->totalram || mem_total < info->totalswap)
2572 goto out;
2573 bitcount = 0;
2574 mem_unit = info->mem_unit;
2575 while (mem_unit > 1) {
2576 bitcount++;
2577 mem_unit >>= 1;
2578 sav_total = mem_total;
2579 mem_total <<= 1;
2580 if (mem_total < sav_total)
2581 goto out;
2582 }
2583
2584
2585
2586
2587
2588
2589
2590
2591 info->mem_unit = 1;
2592 info->totalram <<= bitcount;
2593 info->freeram <<= bitcount;
2594 info->sharedram <<= bitcount;
2595 info->bufferram <<= bitcount;
2596 info->totalswap <<= bitcount;
2597 info->freeswap <<= bitcount;
2598 info->totalhigh <<= bitcount;
2599 info->freehigh <<= bitcount;
2600
2601out:
2602 return 0;
2603}
2604
2605SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2606{
2607 struct sysinfo val;
2608
2609 do_sysinfo(&val);
2610
2611 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2612 return -EFAULT;
2613
2614 return 0;
2615}
2616
2617#ifdef CONFIG_COMPAT
2618struct compat_sysinfo {
2619 s32 uptime;
2620 u32 loads[3];
2621 u32 totalram;
2622 u32 freeram;
2623 u32 sharedram;
2624 u32 bufferram;
2625 u32 totalswap;
2626 u32 freeswap;
2627 u16 procs;
2628 u16 pad;
2629 u32 totalhigh;
2630 u32 freehigh;
2631 u32 mem_unit;
2632 char _f[20-2*sizeof(u32)-sizeof(int)];
2633};
2634
2635COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2636{
2637 struct sysinfo s;
2638
2639 do_sysinfo(&s);
2640
2641
2642
2643
2644 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2645 int bitcount = 0;
2646
2647 while (s.mem_unit < PAGE_SIZE) {
2648 s.mem_unit <<= 1;
2649 bitcount++;
2650 }
2651
2652 s.totalram >>= bitcount;
2653 s.freeram >>= bitcount;
2654 s.sharedram >>= bitcount;
2655 s.bufferram >>= bitcount;
2656 s.totalswap >>= bitcount;
2657 s.freeswap >>= bitcount;
2658 s.totalhigh >>= bitcount;
2659 s.freehigh >>= bitcount;
2660 }
2661
2662 if (!access_ok(info, sizeof(struct compat_sysinfo)) ||
2663 __put_user(s.uptime, &info->uptime) ||
2664 __put_user(s.loads[0], &info->loads[0]) ||
2665 __put_user(s.loads[1], &info->loads[1]) ||
2666 __put_user(s.loads[2], &info->loads[2]) ||
2667 __put_user(s.totalram, &info->totalram) ||
2668 __put_user(s.freeram, &info->freeram) ||
2669 __put_user(s.sharedram, &info->sharedram) ||
2670 __put_user(s.bufferram, &info->bufferram) ||
2671 __put_user(s.totalswap, &info->totalswap) ||
2672 __put_user(s.freeswap, &info->freeswap) ||
2673 __put_user(s.procs, &info->procs) ||
2674 __put_user(s.totalhigh, &info->totalhigh) ||
2675 __put_user(s.freehigh, &info->freehigh) ||
2676 __put_user(s.mem_unit, &info->mem_unit))
2677 return -EFAULT;
2678
2679 return 0;
2680}
2681#endif
2682