1
2
3
4
5
6
7
8#include <linux/export.h>
9#include <linux/mm.h>
10#include <linux/utsname.h>
11#include <linux/mman.h>
12#include <linux/reboot.h>
13#include <linux/prctl.h>
14#include <linux/highuid.h>
15#include <linux/fs.h>
16#include <linux/kmod.h>
17#include <linux/perf_event.h>
18#include <linux/resource.h>
19#include <linux/kernel.h>
20#include <linux/workqueue.h>
21#include <linux/capability.h>
22#include <linux/device.h>
23#include <linux/key.h>
24#include <linux/times.h>
25#include <linux/posix-timers.h>
26#include <linux/security.h>
27#include <linux/dcookies.h>
28#include <linux/suspend.h>
29#include <linux/tty.h>
30#include <linux/signal.h>
31#include <linux/cn_proc.h>
32#include <linux/getcpu.h>
33#include <linux/task_io_accounting_ops.h>
34#include <linux/seccomp.h>
35#include <linux/cpu.h>
36#include <linux/personality.h>
37#include <linux/ptrace.h>
38#include <linux/fs_struct.h>
39#include <linux/file.h>
40#include <linux/mount.h>
41#include <linux/gfp.h>
42#include <linux/syscore_ops.h>
43#include <linux/version.h>
44#include <linux/ctype.h>
45
46#include <linux/compat.h>
47#include <linux/syscalls.h>
48#include <linux/kprobes.h>
49#include <linux/user_namespace.h>
50#include <linux/binfmts.h>
51
52#include <linux/sched.h>
53#include <linux/sched/autogroup.h>
54#include <linux/sched/loadavg.h>
55#include <linux/sched/stat.h>
56#include <linux/sched/mm.h>
57#include <linux/sched/coredump.h>
58#include <linux/sched/task.h>
59#include <linux/sched/cputime.h>
60#include <linux/rcupdate.h>
61#include <linux/uidgid.h>
62#include <linux/cred.h>
63
64#include <linux/nospec.h>
65
66#include <linux/kmsg_dump.h>
67
68#include <generated/utsrelease.h>
69
70#include <linux/uaccess.h>
71#include <asm/io.h>
72#include <asm/unistd.h>
73
74#include "uid16.h"
75
76#ifndef SET_UNALIGN_CTL
77# define SET_UNALIGN_CTL(a, b) (-EINVAL)
78#endif
79#ifndef GET_UNALIGN_CTL
80# define GET_UNALIGN_CTL(a, b) (-EINVAL)
81#endif
82#ifndef SET_FPEMU_CTL
83# define SET_FPEMU_CTL(a, b) (-EINVAL)
84#endif
85#ifndef GET_FPEMU_CTL
86# define GET_FPEMU_CTL(a, b) (-EINVAL)
87#endif
88#ifndef SET_FPEXC_CTL
89# define SET_FPEXC_CTL(a, b) (-EINVAL)
90#endif
91#ifndef GET_FPEXC_CTL
92# define GET_FPEXC_CTL(a, b) (-EINVAL)
93#endif
94#ifndef GET_ENDIAN
95# define GET_ENDIAN(a, b) (-EINVAL)
96#endif
97#ifndef SET_ENDIAN
98# define SET_ENDIAN(a, b) (-EINVAL)
99#endif
100#ifndef GET_TSC_CTL
101# define GET_TSC_CTL(a) (-EINVAL)
102#endif
103#ifndef SET_TSC_CTL
104# define SET_TSC_CTL(a) (-EINVAL)
105#endif
106#ifndef MPX_ENABLE_MANAGEMENT
107# define MPX_ENABLE_MANAGEMENT() (-EINVAL)
108#endif
109#ifndef MPX_DISABLE_MANAGEMENT
110# define MPX_DISABLE_MANAGEMENT() (-EINVAL)
111#endif
112#ifndef GET_FP_MODE
113# define GET_FP_MODE(a) (-EINVAL)
114#endif
115#ifndef SET_FP_MODE
116# define SET_FP_MODE(a,b) (-EINVAL)
117#endif
118#ifndef SVE_SET_VL
119# define SVE_SET_VL(a) (-EINVAL)
120#endif
121#ifndef SVE_GET_VL
122# define SVE_GET_VL() (-EINVAL)
123#endif
124#ifndef PAC_RESET_KEYS
125# define PAC_RESET_KEYS(a, b) (-EINVAL)
126#endif
127
128
129
130
131
132
133int overflowuid = DEFAULT_OVERFLOWUID;
134int overflowgid = DEFAULT_OVERFLOWGID;
135
136EXPORT_SYMBOL(overflowuid);
137EXPORT_SYMBOL(overflowgid);
138
139
140
141
142
143
144int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
145int fs_overflowgid = DEFAULT_FS_OVERFLOWGID;
146
147EXPORT_SYMBOL(fs_overflowuid);
148EXPORT_SYMBOL(fs_overflowgid);
149
150
151
152
153
154
155
156static bool set_one_prio_perm(struct task_struct *p)
157{
158 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
159
160 if (uid_eq(pcred->uid, cred->euid) ||
161 uid_eq(pcred->euid, cred->euid))
162 return true;
163 if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
164 return true;
165 return false;
166}
167
168
169
170
171
172static int set_one_prio(struct task_struct *p, int niceval, int error)
173{
174 int no_nice;
175
176 if (!set_one_prio_perm(p)) {
177 error = -EPERM;
178 goto out;
179 }
180 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
181 error = -EACCES;
182 goto out;
183 }
184 no_nice = security_task_setnice(p, niceval);
185 if (no_nice) {
186 error = no_nice;
187 goto out;
188 }
189 if (error == -ESRCH)
190 error = 0;
191 set_user_nice(p, niceval);
192out:
193 return error;
194}
195
196SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
197{
198 struct task_struct *g, *p;
199 struct user_struct *user;
200 const struct cred *cred = current_cred();
201 int error = -EINVAL;
202 struct pid *pgrp;
203 kuid_t uid;
204
205 if (which > PRIO_USER || which < PRIO_PROCESS)
206 goto out;
207
208
209 error = -ESRCH;
210 if (niceval < MIN_NICE)
211 niceval = MIN_NICE;
212 if (niceval > MAX_NICE)
213 niceval = MAX_NICE;
214
215 rcu_read_lock();
216 read_lock(&tasklist_lock);
217 switch (which) {
218 case PRIO_PROCESS:
219 if (who)
220 p = find_task_by_vpid(who);
221 else
222 p = current;
223 if (p)
224 error = set_one_prio(p, niceval, error);
225 break;
226 case PRIO_PGRP:
227 if (who)
228 pgrp = find_vpid(who);
229 else
230 pgrp = task_pgrp(current);
231 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
232 error = set_one_prio(p, niceval, error);
233 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
234 break;
235 case PRIO_USER:
236 uid = make_kuid(cred->user_ns, who);
237 user = cred->user;
238 if (!who)
239 uid = cred->uid;
240 else if (!uid_eq(uid, cred->uid)) {
241 user = find_user(uid);
242 if (!user)
243 goto out_unlock;
244 }
245 do_each_thread(g, p) {
246 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
247 error = set_one_prio(p, niceval, error);
248 } while_each_thread(g, p);
249 if (!uid_eq(uid, cred->uid))
250 free_uid(user);
251 break;
252 }
253out_unlock:
254 read_unlock(&tasklist_lock);
255 rcu_read_unlock();
256out:
257 return error;
258}
259
260
261
262
263
264
265
266SYSCALL_DEFINE2(getpriority, int, which, int, who)
267{
268 struct task_struct *g, *p;
269 struct user_struct *user;
270 const struct cred *cred = current_cred();
271 long niceval, retval = -ESRCH;
272 struct pid *pgrp;
273 kuid_t uid;
274
275 if (which > PRIO_USER || which < PRIO_PROCESS)
276 return -EINVAL;
277
278 rcu_read_lock();
279 read_lock(&tasklist_lock);
280 switch (which) {
281 case PRIO_PROCESS:
282 if (who)
283 p = find_task_by_vpid(who);
284 else
285 p = current;
286 if (p) {
287 niceval = nice_to_rlimit(task_nice(p));
288 if (niceval > retval)
289 retval = niceval;
290 }
291 break;
292 case PRIO_PGRP:
293 if (who)
294 pgrp = find_vpid(who);
295 else
296 pgrp = task_pgrp(current);
297 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
298 niceval = nice_to_rlimit(task_nice(p));
299 if (niceval > retval)
300 retval = niceval;
301 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
302 break;
303 case PRIO_USER:
304 uid = make_kuid(cred->user_ns, who);
305 user = cred->user;
306 if (!who)
307 uid = cred->uid;
308 else if (!uid_eq(uid, cred->uid)) {
309 user = find_user(uid);
310 if (!user)
311 goto out_unlock;
312 }
313 do_each_thread(g, p) {
314 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
315 niceval = nice_to_rlimit(task_nice(p));
316 if (niceval > retval)
317 retval = niceval;
318 }
319 } while_each_thread(g, p);
320 if (!uid_eq(uid, cred->uid))
321 free_uid(user);
322 break;
323 }
324out_unlock:
325 read_unlock(&tasklist_lock);
326 rcu_read_unlock();
327
328 return retval;
329}
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349#ifdef CONFIG_MULTIUSER
350long __sys_setregid(gid_t rgid, gid_t egid)
351{
352 struct user_namespace *ns = current_user_ns();
353 const struct cred *old;
354 struct cred *new;
355 int retval;
356 kgid_t krgid, kegid;
357
358 krgid = make_kgid(ns, rgid);
359 kegid = make_kgid(ns, egid);
360
361 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
362 return -EINVAL;
363 if ((egid != (gid_t) -1) && !gid_valid(kegid))
364 return -EINVAL;
365
366 new = prepare_creds();
367 if (!new)
368 return -ENOMEM;
369 old = current_cred();
370
371 retval = -EPERM;
372 if (rgid != (gid_t) -1) {
373 if (gid_eq(old->gid, krgid) ||
374 gid_eq(old->egid, krgid) ||
375 ns_capable(old->user_ns, CAP_SETGID))
376 new->gid = krgid;
377 else
378 goto error;
379 }
380 if (egid != (gid_t) -1) {
381 if (gid_eq(old->gid, kegid) ||
382 gid_eq(old->egid, kegid) ||
383 gid_eq(old->sgid, kegid) ||
384 ns_capable(old->user_ns, CAP_SETGID))
385 new->egid = kegid;
386 else
387 goto error;
388 }
389
390 if (rgid != (gid_t) -1 ||
391 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
392 new->sgid = new->egid;
393 new->fsgid = new->egid;
394
395 return commit_creds(new);
396
397error:
398 abort_creds(new);
399 return retval;
400}
401
402SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
403{
404 return __sys_setregid(rgid, egid);
405}
406
407
408
409
410
411
412long __sys_setgid(gid_t gid)
413{
414 struct user_namespace *ns = current_user_ns();
415 const struct cred *old;
416 struct cred *new;
417 int retval;
418 kgid_t kgid;
419
420 kgid = make_kgid(ns, gid);
421 if (!gid_valid(kgid))
422 return -EINVAL;
423
424 new = prepare_creds();
425 if (!new)
426 return -ENOMEM;
427 old = current_cred();
428
429 retval = -EPERM;
430 if (ns_capable(old->user_ns, CAP_SETGID))
431 new->gid = new->egid = new->sgid = new->fsgid = kgid;
432 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
433 new->egid = new->fsgid = kgid;
434 else
435 goto error;
436
437 return commit_creds(new);
438
439error:
440 abort_creds(new);
441 return retval;
442}
443
444SYSCALL_DEFINE1(setgid, gid_t, gid)
445{
446 return __sys_setgid(gid);
447}
448
449
450
451
452static int set_user(struct cred *new)
453{
454 struct user_struct *new_user;
455
456 new_user = alloc_uid(new->uid);
457 if (!new_user)
458 return -EAGAIN;
459
460
461
462
463
464
465
466
467 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
468 new_user != INIT_USER)
469 current->flags |= PF_NPROC_EXCEEDED;
470 else
471 current->flags &= ~PF_NPROC_EXCEEDED;
472
473 free_uid(new->user);
474 new->user = new_user;
475 return 0;
476}
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493long __sys_setreuid(uid_t ruid, uid_t euid)
494{
495 struct user_namespace *ns = current_user_ns();
496 const struct cred *old;
497 struct cred *new;
498 int retval;
499 kuid_t kruid, keuid;
500
501 kruid = make_kuid(ns, ruid);
502 keuid = make_kuid(ns, euid);
503
504 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
505 return -EINVAL;
506 if ((euid != (uid_t) -1) && !uid_valid(keuid))
507 return -EINVAL;
508
509 new = prepare_creds();
510 if (!new)
511 return -ENOMEM;
512 old = current_cred();
513
514 retval = -EPERM;
515 if (ruid != (uid_t) -1) {
516 new->uid = kruid;
517 if (!uid_eq(old->uid, kruid) &&
518 !uid_eq(old->euid, kruid) &&
519 !ns_capable(old->user_ns, CAP_SETUID))
520 goto error;
521 }
522
523 if (euid != (uid_t) -1) {
524 new->euid = keuid;
525 if (!uid_eq(old->uid, keuid) &&
526 !uid_eq(old->euid, keuid) &&
527 !uid_eq(old->suid, keuid) &&
528 !ns_capable(old->user_ns, CAP_SETUID))
529 goto error;
530 }
531
532 if (!uid_eq(new->uid, old->uid)) {
533 retval = set_user(new);
534 if (retval < 0)
535 goto error;
536 }
537 if (ruid != (uid_t) -1 ||
538 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
539 new->suid = new->euid;
540 new->fsuid = new->euid;
541
542 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
543 if (retval < 0)
544 goto error;
545
546 return commit_creds(new);
547
548error:
549 abort_creds(new);
550 return retval;
551}
552
553SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
554{
555 return __sys_setreuid(ruid, euid);
556}
557
558
559
560
561
562
563
564
565
566
567
568
569long __sys_setuid(uid_t uid)
570{
571 struct user_namespace *ns = current_user_ns();
572 const struct cred *old;
573 struct cred *new;
574 int retval;
575 kuid_t kuid;
576
577 kuid = make_kuid(ns, uid);
578 if (!uid_valid(kuid))
579 return -EINVAL;
580
581 new = prepare_creds();
582 if (!new)
583 return -ENOMEM;
584 old = current_cred();
585
586 retval = -EPERM;
587 if (ns_capable(old->user_ns, CAP_SETUID)) {
588 new->suid = new->uid = kuid;
589 if (!uid_eq(kuid, old->uid)) {
590 retval = set_user(new);
591 if (retval < 0)
592 goto error;
593 }
594 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
595 goto error;
596 }
597
598 new->fsuid = new->euid = kuid;
599
600 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
601 if (retval < 0)
602 goto error;
603
604 return commit_creds(new);
605
606error:
607 abort_creds(new);
608 return retval;
609}
610
611SYSCALL_DEFINE1(setuid, uid_t, uid)
612{
613 return __sys_setuid(uid);
614}
615
616
617
618
619
620
621long __sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
622{
623 struct user_namespace *ns = current_user_ns();
624 const struct cred *old;
625 struct cred *new;
626 int retval;
627 kuid_t kruid, keuid, ksuid;
628
629 kruid = make_kuid(ns, ruid);
630 keuid = make_kuid(ns, euid);
631 ksuid = make_kuid(ns, suid);
632
633 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
634 return -EINVAL;
635
636 if ((euid != (uid_t) -1) && !uid_valid(keuid))
637 return -EINVAL;
638
639 if ((suid != (uid_t) -1) && !uid_valid(ksuid))
640 return -EINVAL;
641
642 new = prepare_creds();
643 if (!new)
644 return -ENOMEM;
645
646 old = current_cred();
647
648 retval = -EPERM;
649 if (!ns_capable(old->user_ns, CAP_SETUID)) {
650 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
651 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
652 goto error;
653 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
654 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
655 goto error;
656 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
657 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
658 goto error;
659 }
660
661 if (ruid != (uid_t) -1) {
662 new->uid = kruid;
663 if (!uid_eq(kruid, old->uid)) {
664 retval = set_user(new);
665 if (retval < 0)
666 goto error;
667 }
668 }
669 if (euid != (uid_t) -1)
670 new->euid = keuid;
671 if (suid != (uid_t) -1)
672 new->suid = ksuid;
673 new->fsuid = new->euid;
674
675 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
676 if (retval < 0)
677 goto error;
678
679 return commit_creds(new);
680
681error:
682 abort_creds(new);
683 return retval;
684}
685
686SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
687{
688 return __sys_setresuid(ruid, euid, suid);
689}
690
691SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
692{
693 const struct cred *cred = current_cred();
694 int retval;
695 uid_t ruid, euid, suid;
696
697 ruid = from_kuid_munged(cred->user_ns, cred->uid);
698 euid = from_kuid_munged(cred->user_ns, cred->euid);
699 suid = from_kuid_munged(cred->user_ns, cred->suid);
700
701 retval = put_user(ruid, ruidp);
702 if (!retval) {
703 retval = put_user(euid, euidp);
704 if (!retval)
705 return put_user(suid, suidp);
706 }
707 return retval;
708}
709
710
711
712
713long __sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
714{
715 struct user_namespace *ns = current_user_ns();
716 const struct cred *old;
717 struct cred *new;
718 int retval;
719 kgid_t krgid, kegid, ksgid;
720
721 krgid = make_kgid(ns, rgid);
722 kegid = make_kgid(ns, egid);
723 ksgid = make_kgid(ns, sgid);
724
725 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
726 return -EINVAL;
727 if ((egid != (gid_t) -1) && !gid_valid(kegid))
728 return -EINVAL;
729 if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
730 return -EINVAL;
731
732 new = prepare_creds();
733 if (!new)
734 return -ENOMEM;
735 old = current_cred();
736
737 retval = -EPERM;
738 if (!ns_capable(old->user_ns, CAP_SETGID)) {
739 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
740 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
741 goto error;
742 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
743 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
744 goto error;
745 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
746 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
747 goto error;
748 }
749
750 if (rgid != (gid_t) -1)
751 new->gid = krgid;
752 if (egid != (gid_t) -1)
753 new->egid = kegid;
754 if (sgid != (gid_t) -1)
755 new->sgid = ksgid;
756 new->fsgid = new->egid;
757
758 return commit_creds(new);
759
760error:
761 abort_creds(new);
762 return retval;
763}
764
765SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
766{
767 return __sys_setresgid(rgid, egid, sgid);
768}
769
770SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
771{
772 const struct cred *cred = current_cred();
773 int retval;
774 gid_t rgid, egid, sgid;
775
776 rgid = from_kgid_munged(cred->user_ns, cred->gid);
777 egid = from_kgid_munged(cred->user_ns, cred->egid);
778 sgid = from_kgid_munged(cred->user_ns, cred->sgid);
779
780 retval = put_user(rgid, rgidp);
781 if (!retval) {
782 retval = put_user(egid, egidp);
783 if (!retval)
784 retval = put_user(sgid, sgidp);
785 }
786
787 return retval;
788}
789
790
791
792
793
794
795
796
797long __sys_setfsuid(uid_t uid)
798{
799 const struct cred *old;
800 struct cred *new;
801 uid_t old_fsuid;
802 kuid_t kuid;
803
804 old = current_cred();
805 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
806
807 kuid = make_kuid(old->user_ns, uid);
808 if (!uid_valid(kuid))
809 return old_fsuid;
810
811 new = prepare_creds();
812 if (!new)
813 return old_fsuid;
814
815 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
816 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
817 ns_capable(old->user_ns, CAP_SETUID)) {
818 if (!uid_eq(kuid, old->fsuid)) {
819 new->fsuid = kuid;
820 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
821 goto change_okay;
822 }
823 }
824
825 abort_creds(new);
826 return old_fsuid;
827
828change_okay:
829 commit_creds(new);
830 return old_fsuid;
831}
832
833SYSCALL_DEFINE1(setfsuid, uid_t, uid)
834{
835 return __sys_setfsuid(uid);
836}
837
838
839
840
841long __sys_setfsgid(gid_t gid)
842{
843 const struct cred *old;
844 struct cred *new;
845 gid_t old_fsgid;
846 kgid_t kgid;
847
848 old = current_cred();
849 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
850
851 kgid = make_kgid(old->user_ns, gid);
852 if (!gid_valid(kgid))
853 return old_fsgid;
854
855 new = prepare_creds();
856 if (!new)
857 return old_fsgid;
858
859 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
860 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
861 ns_capable(old->user_ns, CAP_SETGID)) {
862 if (!gid_eq(kgid, old->fsgid)) {
863 new->fsgid = kgid;
864 goto change_okay;
865 }
866 }
867
868 abort_creds(new);
869 return old_fsgid;
870
871change_okay:
872 commit_creds(new);
873 return old_fsgid;
874}
875
876SYSCALL_DEFINE1(setfsgid, gid_t, gid)
877{
878 return __sys_setfsgid(gid);
879}
880#endif
881
882
883
884
885
886
887
888
889
890
891SYSCALL_DEFINE0(getpid)
892{
893 return task_tgid_vnr(current);
894}
895
896
897SYSCALL_DEFINE0(gettid)
898{
899 return task_pid_vnr(current);
900}
901
902
903
904
905
906
907
908SYSCALL_DEFINE0(getppid)
909{
910 int pid;
911
912 rcu_read_lock();
913 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
914 rcu_read_unlock();
915
916 return pid;
917}
918
919SYSCALL_DEFINE0(getuid)
920{
921
922 return from_kuid_munged(current_user_ns(), current_uid());
923}
924
925SYSCALL_DEFINE0(geteuid)
926{
927
928 return from_kuid_munged(current_user_ns(), current_euid());
929}
930
931SYSCALL_DEFINE0(getgid)
932{
933
934 return from_kgid_munged(current_user_ns(), current_gid());
935}
936
937SYSCALL_DEFINE0(getegid)
938{
939
940 return from_kgid_munged(current_user_ns(), current_egid());
941}
942
943static void do_sys_times(struct tms *tms)
944{
945 u64 tgutime, tgstime, cutime, cstime;
946
947 thread_group_cputime_adjusted(current, &tgutime, &tgstime);
948 cutime = current->signal->cutime;
949 cstime = current->signal->cstime;
950 tms->tms_utime = nsec_to_clock_t(tgutime);
951 tms->tms_stime = nsec_to_clock_t(tgstime);
952 tms->tms_cutime = nsec_to_clock_t(cutime);
953 tms->tms_cstime = nsec_to_clock_t(cstime);
954}
955
956SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
957{
958 if (tbuf) {
959 struct tms tmp;
960
961 do_sys_times(&tmp);
962 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
963 return -EFAULT;
964 }
965 force_successful_syscall_return();
966 return (long) jiffies_64_to_clock_t(get_jiffies_64());
967}
968
969#ifdef CONFIG_COMPAT
970static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
971{
972 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
973}
974
975COMPAT_SYSCALL_DEFINE1(times, struct compat_tms __user *, tbuf)
976{
977 if (tbuf) {
978 struct tms tms;
979 struct compat_tms tmp;
980
981 do_sys_times(&tms);
982
983 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
984 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
985 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
986 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
987 if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
988 return -EFAULT;
989 }
990 force_successful_syscall_return();
991 return compat_jiffies_to_clock_t(jiffies);
992}
993#endif
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
1007{
1008 struct task_struct *p;
1009 struct task_struct *group_leader = current->group_leader;
1010 struct pid *pgrp;
1011 int err;
1012
1013 if (!pid)
1014 pid = task_pid_vnr(group_leader);
1015 if (!pgid)
1016 pgid = pid;
1017 if (pgid < 0)
1018 return -EINVAL;
1019 rcu_read_lock();
1020
1021
1022
1023
1024 write_lock_irq(&tasklist_lock);
1025
1026 err = -ESRCH;
1027 p = find_task_by_vpid(pid);
1028 if (!p)
1029 goto out;
1030
1031 err = -EINVAL;
1032 if (!thread_group_leader(p))
1033 goto out;
1034
1035 if (same_thread_group(p->real_parent, group_leader)) {
1036 err = -EPERM;
1037 if (task_session(p) != task_session(group_leader))
1038 goto out;
1039 err = -EACCES;
1040 if (!(p->flags & PF_FORKNOEXEC))
1041 goto out;
1042 } else {
1043 err = -ESRCH;
1044 if (p != group_leader)
1045 goto out;
1046 }
1047
1048 err = -EPERM;
1049 if (p->signal->leader)
1050 goto out;
1051
1052 pgrp = task_pid(p);
1053 if (pgid != pid) {
1054 struct task_struct *g;
1055
1056 pgrp = find_vpid(pgid);
1057 g = pid_task(pgrp, PIDTYPE_PGID);
1058 if (!g || task_session(g) != task_session(group_leader))
1059 goto out;
1060 }
1061
1062 err = security_task_setpgid(p, pgid);
1063 if (err)
1064 goto out;
1065
1066 if (task_pgrp(p) != pgrp)
1067 change_pid(p, PIDTYPE_PGID, pgrp);
1068
1069 err = 0;
1070out:
1071
1072 write_unlock_irq(&tasklist_lock);
1073 rcu_read_unlock();
1074 return err;
1075}
1076
1077static int do_getpgid(pid_t pid)
1078{
1079 struct task_struct *p;
1080 struct pid *grp;
1081 int retval;
1082
1083 rcu_read_lock();
1084 if (!pid)
1085 grp = task_pgrp(current);
1086 else {
1087 retval = -ESRCH;
1088 p = find_task_by_vpid(pid);
1089 if (!p)
1090 goto out;
1091 grp = task_pgrp(p);
1092 if (!grp)
1093 goto out;
1094
1095 retval = security_task_getpgid(p);
1096 if (retval)
1097 goto out;
1098 }
1099 retval = pid_vnr(grp);
1100out:
1101 rcu_read_unlock();
1102 return retval;
1103}
1104
1105SYSCALL_DEFINE1(getpgid, pid_t, pid)
1106{
1107 return do_getpgid(pid);
1108}
1109
1110#ifdef __ARCH_WANT_SYS_GETPGRP
1111
1112SYSCALL_DEFINE0(getpgrp)
1113{
1114 return do_getpgid(0);
1115}
1116
1117#endif
1118
1119SYSCALL_DEFINE1(getsid, pid_t, pid)
1120{
1121 struct task_struct *p;
1122 struct pid *sid;
1123 int retval;
1124
1125 rcu_read_lock();
1126 if (!pid)
1127 sid = task_session(current);
1128 else {
1129 retval = -ESRCH;
1130 p = find_task_by_vpid(pid);
1131 if (!p)
1132 goto out;
1133 sid = task_session(p);
1134 if (!sid)
1135 goto out;
1136
1137 retval = security_task_getsid(p);
1138 if (retval)
1139 goto out;
1140 }
1141 retval = pid_vnr(sid);
1142out:
1143 rcu_read_unlock();
1144 return retval;
1145}
1146
1147static void set_special_pids(struct pid *pid)
1148{
1149 struct task_struct *curr = current->group_leader;
1150
1151 if (task_session(curr) != pid)
1152 change_pid(curr, PIDTYPE_SID, pid);
1153
1154 if (task_pgrp(curr) != pid)
1155 change_pid(curr, PIDTYPE_PGID, pid);
1156}
1157
1158int ksys_setsid(void)
1159{
1160 struct task_struct *group_leader = current->group_leader;
1161 struct pid *sid = task_pid(group_leader);
1162 pid_t session = pid_vnr(sid);
1163 int err = -EPERM;
1164
1165 write_lock_irq(&tasklist_lock);
1166
1167 if (group_leader->signal->leader)
1168 goto out;
1169
1170
1171
1172
1173 if (pid_task(sid, PIDTYPE_PGID))
1174 goto out;
1175
1176 group_leader->signal->leader = 1;
1177 set_special_pids(sid);
1178
1179 proc_clear_tty(group_leader);
1180
1181 err = session;
1182out:
1183 write_unlock_irq(&tasklist_lock);
1184 if (err > 0) {
1185 proc_sid_connector(group_leader);
1186 sched_autogroup_create_attach(group_leader);
1187 }
1188 return err;
1189}
1190
1191SYSCALL_DEFINE0(setsid)
1192{
1193 return ksys_setsid();
1194}
1195
1196DECLARE_RWSEM(uts_sem);
1197
1198#ifdef COMPAT_UTS_MACHINE
1199#define override_architecture(name) \
1200 (personality(current->personality) == PER_LINUX32 && \
1201 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1202 sizeof(COMPAT_UTS_MACHINE)))
1203#else
1204#define override_architecture(name) 0
1205#endif
1206
1207
1208
1209
1210
1211
1212
1213static int override_release(char __user *release, size_t len)
1214{
1215 int ret = 0;
1216
1217 if (current->personality & UNAME26) {
1218 const char *rest = UTS_RELEASE;
1219 char buf[65] = { 0 };
1220 int ndots = 0;
1221 unsigned v;
1222 size_t copy;
1223
1224 while (*rest) {
1225 if (*rest == '.' && ++ndots >= 3)
1226 break;
1227 if (!isdigit(*rest) && *rest != '.')
1228 break;
1229 rest++;
1230 }
1231 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
1232 copy = clamp_t(size_t, len, 1, sizeof(buf));
1233 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1234 ret = copy_to_user(release, buf, copy + 1);
1235 }
1236 return ret;
1237}
1238
1239SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1240{
1241 struct new_utsname tmp;
1242
1243 down_read(&uts_sem);
1244 memcpy(&tmp, utsname(), sizeof(tmp));
1245 up_read(&uts_sem);
1246 if (copy_to_user(name, &tmp, sizeof(tmp)))
1247 return -EFAULT;
1248
1249 if (override_release(name->release, sizeof(name->release)))
1250 return -EFAULT;
1251 if (override_architecture(name))
1252 return -EFAULT;
1253 return 0;
1254}
1255
1256#ifdef __ARCH_WANT_SYS_OLD_UNAME
1257
1258
1259
1260SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1261{
1262 struct old_utsname tmp;
1263
1264 if (!name)
1265 return -EFAULT;
1266
1267 down_read(&uts_sem);
1268 memcpy(&tmp, utsname(), sizeof(tmp));
1269 up_read(&uts_sem);
1270 if (copy_to_user(name, &tmp, sizeof(tmp)))
1271 return -EFAULT;
1272
1273 if (override_release(name->release, sizeof(name->release)))
1274 return -EFAULT;
1275 if (override_architecture(name))
1276 return -EFAULT;
1277 return 0;
1278}
1279
1280SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1281{
1282 struct oldold_utsname tmp = {};
1283
1284 if (!name)
1285 return -EFAULT;
1286
1287 down_read(&uts_sem);
1288 memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
1289 memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
1290 memcpy(&tmp.release, &utsname()->release, __OLD_UTS_LEN);
1291 memcpy(&tmp.version, &utsname()->version, __OLD_UTS_LEN);
1292 memcpy(&tmp.machine, &utsname()->machine, __OLD_UTS_LEN);
1293 up_read(&uts_sem);
1294 if (copy_to_user(name, &tmp, sizeof(tmp)))
1295 return -EFAULT;
1296
1297 if (override_architecture(name))
1298 return -EFAULT;
1299 if (override_release(name->release, sizeof(name->release)))
1300 return -EFAULT;
1301 return 0;
1302}
1303#endif
1304
1305SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1306{
1307 int errno;
1308 char tmp[__NEW_UTS_LEN];
1309
1310 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1311 return -EPERM;
1312
1313 if (len < 0 || len > __NEW_UTS_LEN)
1314 return -EINVAL;
1315 errno = -EFAULT;
1316 if (!copy_from_user(tmp, name, len)) {
1317 struct new_utsname *u;
1318
1319 down_write(&uts_sem);
1320 u = utsname();
1321 memcpy(u->nodename, tmp, len);
1322 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1323 errno = 0;
1324 uts_proc_notify(UTS_PROC_HOSTNAME);
1325 up_write(&uts_sem);
1326 }
1327 return errno;
1328}
1329
1330#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1331
1332SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1333{
1334 int i;
1335 struct new_utsname *u;
1336 char tmp[__NEW_UTS_LEN + 1];
1337
1338 if (len < 0)
1339 return -EINVAL;
1340 down_read(&uts_sem);
1341 u = utsname();
1342 i = 1 + strlen(u->nodename);
1343 if (i > len)
1344 i = len;
1345 memcpy(tmp, u->nodename, i);
1346 up_read(&uts_sem);
1347 if (copy_to_user(name, tmp, i))
1348 return -EFAULT;
1349 return 0;
1350}
1351
1352#endif
1353
1354
1355
1356
1357
1358SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1359{
1360 int errno;
1361 char tmp[__NEW_UTS_LEN];
1362
1363 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1364 return -EPERM;
1365 if (len < 0 || len > __NEW_UTS_LEN)
1366 return -EINVAL;
1367
1368 errno = -EFAULT;
1369 if (!copy_from_user(tmp, name, len)) {
1370 struct new_utsname *u;
1371
1372 down_write(&uts_sem);
1373 u = utsname();
1374 memcpy(u->domainname, tmp, len);
1375 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1376 errno = 0;
1377 uts_proc_notify(UTS_PROC_DOMAINNAME);
1378 up_write(&uts_sem);
1379 }
1380 return errno;
1381}
1382
1383SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1384{
1385 struct rlimit value;
1386 int ret;
1387
1388 ret = do_prlimit(current, resource, NULL, &value);
1389 if (!ret)
1390 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1391
1392 return ret;
1393}
1394
1395#ifdef CONFIG_COMPAT
1396
1397COMPAT_SYSCALL_DEFINE2(setrlimit, unsigned int, resource,
1398 struct compat_rlimit __user *, rlim)
1399{
1400 struct rlimit r;
1401 struct compat_rlimit r32;
1402
1403 if (copy_from_user(&r32, rlim, sizeof(struct compat_rlimit)))
1404 return -EFAULT;
1405
1406 if (r32.rlim_cur == COMPAT_RLIM_INFINITY)
1407 r.rlim_cur = RLIM_INFINITY;
1408 else
1409 r.rlim_cur = r32.rlim_cur;
1410 if (r32.rlim_max == COMPAT_RLIM_INFINITY)
1411 r.rlim_max = RLIM_INFINITY;
1412 else
1413 r.rlim_max = r32.rlim_max;
1414 return do_prlimit(current, resource, &r, NULL);
1415}
1416
1417COMPAT_SYSCALL_DEFINE2(getrlimit, unsigned int, resource,
1418 struct compat_rlimit __user *, rlim)
1419{
1420 struct rlimit r;
1421 int ret;
1422
1423 ret = do_prlimit(current, resource, NULL, &r);
1424 if (!ret) {
1425 struct compat_rlimit r32;
1426 if (r.rlim_cur > COMPAT_RLIM_INFINITY)
1427 r32.rlim_cur = COMPAT_RLIM_INFINITY;
1428 else
1429 r32.rlim_cur = r.rlim_cur;
1430 if (r.rlim_max > COMPAT_RLIM_INFINITY)
1431 r32.rlim_max = COMPAT_RLIM_INFINITY;
1432 else
1433 r32.rlim_max = r.rlim_max;
1434
1435 if (copy_to_user(rlim, &r32, sizeof(struct compat_rlimit)))
1436 return -EFAULT;
1437 }
1438 return ret;
1439}
1440
1441#endif
1442
1443#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1444
1445
1446
1447
1448SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1449 struct rlimit __user *, rlim)
1450{
1451 struct rlimit x;
1452 if (resource >= RLIM_NLIMITS)
1453 return -EINVAL;
1454
1455 resource = array_index_nospec(resource, RLIM_NLIMITS);
1456 task_lock(current->group_leader);
1457 x = current->signal->rlim[resource];
1458 task_unlock(current->group_leader);
1459 if (x.rlim_cur > 0x7FFFFFFF)
1460 x.rlim_cur = 0x7FFFFFFF;
1461 if (x.rlim_max > 0x7FFFFFFF)
1462 x.rlim_max = 0x7FFFFFFF;
1463 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1464}
1465
1466#ifdef CONFIG_COMPAT
1467COMPAT_SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1468 struct compat_rlimit __user *, rlim)
1469{
1470 struct rlimit r;
1471
1472 if (resource >= RLIM_NLIMITS)
1473 return -EINVAL;
1474
1475 resource = array_index_nospec(resource, RLIM_NLIMITS);
1476 task_lock(current->group_leader);
1477 r = current->signal->rlim[resource];
1478 task_unlock(current->group_leader);
1479 if (r.rlim_cur > 0x7FFFFFFF)
1480 r.rlim_cur = 0x7FFFFFFF;
1481 if (r.rlim_max > 0x7FFFFFFF)
1482 r.rlim_max = 0x7FFFFFFF;
1483
1484 if (put_user(r.rlim_cur, &rlim->rlim_cur) ||
1485 put_user(r.rlim_max, &rlim->rlim_max))
1486 return -EFAULT;
1487 return 0;
1488}
1489#endif
1490
1491#endif
1492
1493static inline bool rlim64_is_infinity(__u64 rlim64)
1494{
1495#if BITS_PER_LONG < 64
1496 return rlim64 >= ULONG_MAX;
1497#else
1498 return rlim64 == RLIM64_INFINITY;
1499#endif
1500}
1501
1502static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1503{
1504 if (rlim->rlim_cur == RLIM_INFINITY)
1505 rlim64->rlim_cur = RLIM64_INFINITY;
1506 else
1507 rlim64->rlim_cur = rlim->rlim_cur;
1508 if (rlim->rlim_max == RLIM_INFINITY)
1509 rlim64->rlim_max = RLIM64_INFINITY;
1510 else
1511 rlim64->rlim_max = rlim->rlim_max;
1512}
1513
1514static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1515{
1516 if (rlim64_is_infinity(rlim64->rlim_cur))
1517 rlim->rlim_cur = RLIM_INFINITY;
1518 else
1519 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1520 if (rlim64_is_infinity(rlim64->rlim_max))
1521 rlim->rlim_max = RLIM_INFINITY;
1522 else
1523 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1524}
1525
1526
1527int do_prlimit(struct task_struct *tsk, unsigned int resource,
1528 struct rlimit *new_rlim, struct rlimit *old_rlim)
1529{
1530 struct rlimit *rlim;
1531 int retval = 0;
1532
1533 if (resource >= RLIM_NLIMITS)
1534 return -EINVAL;
1535 if (new_rlim) {
1536 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1537 return -EINVAL;
1538 if (resource == RLIMIT_NOFILE &&
1539 new_rlim->rlim_max > sysctl_nr_open)
1540 return -EPERM;
1541 }
1542
1543
1544 read_lock(&tasklist_lock);
1545 if (!tsk->sighand) {
1546 retval = -ESRCH;
1547 goto out;
1548 }
1549
1550 rlim = tsk->signal->rlim + resource;
1551 task_lock(tsk->group_leader);
1552 if (new_rlim) {
1553
1554
1555 if (new_rlim->rlim_max > rlim->rlim_max &&
1556 !capable(CAP_SYS_RESOURCE))
1557 retval = -EPERM;
1558 if (!retval)
1559 retval = security_task_setrlimit(tsk, resource, new_rlim);
1560 if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1561
1562
1563
1564
1565
1566
1567 new_rlim->rlim_cur = 1;
1568 }
1569 }
1570 if (!retval) {
1571 if (old_rlim)
1572 *old_rlim = *rlim;
1573 if (new_rlim)
1574 *rlim = *new_rlim;
1575 }
1576 task_unlock(tsk->group_leader);
1577
1578
1579
1580
1581
1582
1583
1584 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1585 new_rlim->rlim_cur != RLIM_INFINITY &&
1586 IS_ENABLED(CONFIG_POSIX_TIMERS))
1587 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1588out:
1589 read_unlock(&tasklist_lock);
1590 return retval;
1591}
1592
1593
1594static int check_prlimit_permission(struct task_struct *task,
1595 unsigned int flags)
1596{
1597 const struct cred *cred = current_cred(), *tcred;
1598 bool id_match;
1599
1600 if (current == task)
1601 return 0;
1602
1603 tcred = __task_cred(task);
1604 id_match = (uid_eq(cred->uid, tcred->euid) &&
1605 uid_eq(cred->uid, tcred->suid) &&
1606 uid_eq(cred->uid, tcred->uid) &&
1607 gid_eq(cred->gid, tcred->egid) &&
1608 gid_eq(cred->gid, tcred->sgid) &&
1609 gid_eq(cred->gid, tcred->gid));
1610 if (!id_match && !ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1611 return -EPERM;
1612
1613 return security_task_prlimit(cred, tcred, flags);
1614}
1615
1616SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1617 const struct rlimit64 __user *, new_rlim,
1618 struct rlimit64 __user *, old_rlim)
1619{
1620 struct rlimit64 old64, new64;
1621 struct rlimit old, new;
1622 struct task_struct *tsk;
1623 unsigned int checkflags = 0;
1624 int ret;
1625
1626 if (old_rlim)
1627 checkflags |= LSM_PRLIMIT_READ;
1628
1629 if (new_rlim) {
1630 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1631 return -EFAULT;
1632 rlim64_to_rlim(&new64, &new);
1633 checkflags |= LSM_PRLIMIT_WRITE;
1634 }
1635
1636 rcu_read_lock();
1637 tsk = pid ? find_task_by_vpid(pid) : current;
1638 if (!tsk) {
1639 rcu_read_unlock();
1640 return -ESRCH;
1641 }
1642 ret = check_prlimit_permission(tsk, checkflags);
1643 if (ret) {
1644 rcu_read_unlock();
1645 return ret;
1646 }
1647 get_task_struct(tsk);
1648 rcu_read_unlock();
1649
1650 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1651 old_rlim ? &old : NULL);
1652
1653 if (!ret && old_rlim) {
1654 rlim_to_rlim64(&old, &old64);
1655 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1656 ret = -EFAULT;
1657 }
1658
1659 put_task_struct(tsk);
1660 return ret;
1661}
1662
1663SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1664{
1665 struct rlimit new_rlim;
1666
1667 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1668 return -EFAULT;
1669 return do_prlimit(current, resource, &new_rlim, NULL);
1670}
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1706{
1707 r->ru_nvcsw += t->nvcsw;
1708 r->ru_nivcsw += t->nivcsw;
1709 r->ru_minflt += t->min_flt;
1710 r->ru_majflt += t->maj_flt;
1711 r->ru_inblock += task_io_get_inblock(t);
1712 r->ru_oublock += task_io_get_oublock(t);
1713}
1714
1715void getrusage(struct task_struct *p, int who, struct rusage *r)
1716{
1717 struct task_struct *t;
1718 unsigned long flags;
1719 u64 tgutime, tgstime, utime, stime;
1720 unsigned long maxrss = 0;
1721
1722 memset((char *)r, 0, sizeof (*r));
1723 utime = stime = 0;
1724
1725 if (who == RUSAGE_THREAD) {
1726 task_cputime_adjusted(current, &utime, &stime);
1727 accumulate_thread_rusage(p, r);
1728 maxrss = p->signal->maxrss;
1729 goto out;
1730 }
1731
1732 if (!lock_task_sighand(p, &flags))
1733 return;
1734
1735 switch (who) {
1736 case RUSAGE_BOTH:
1737 case RUSAGE_CHILDREN:
1738 utime = p->signal->cutime;
1739 stime = p->signal->cstime;
1740 r->ru_nvcsw = p->signal->cnvcsw;
1741 r->ru_nivcsw = p->signal->cnivcsw;
1742 r->ru_minflt = p->signal->cmin_flt;
1743 r->ru_majflt = p->signal->cmaj_flt;
1744 r->ru_inblock = p->signal->cinblock;
1745 r->ru_oublock = p->signal->coublock;
1746 maxrss = p->signal->cmaxrss;
1747
1748 if (who == RUSAGE_CHILDREN)
1749 break;
1750
1751 case RUSAGE_SELF:
1752 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1753 utime += tgutime;
1754 stime += tgstime;
1755 r->ru_nvcsw += p->signal->nvcsw;
1756 r->ru_nivcsw += p->signal->nivcsw;
1757 r->ru_minflt += p->signal->min_flt;
1758 r->ru_majflt += p->signal->maj_flt;
1759 r->ru_inblock += p->signal->inblock;
1760 r->ru_oublock += p->signal->oublock;
1761 if (maxrss < p->signal->maxrss)
1762 maxrss = p->signal->maxrss;
1763 t = p;
1764 do {
1765 accumulate_thread_rusage(t, r);
1766 } while_each_thread(p, t);
1767 break;
1768
1769 default:
1770 BUG();
1771 }
1772 unlock_task_sighand(p, &flags);
1773
1774out:
1775 r->ru_utime = ns_to_timeval(utime);
1776 r->ru_stime = ns_to_timeval(stime);
1777
1778 if (who != RUSAGE_CHILDREN) {
1779 struct mm_struct *mm = get_task_mm(p);
1780
1781 if (mm) {
1782 setmax_mm_hiwater_rss(&maxrss, mm);
1783 mmput(mm);
1784 }
1785 }
1786 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024);
1787}
1788
1789SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1790{
1791 struct rusage r;
1792
1793 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1794 who != RUSAGE_THREAD)
1795 return -EINVAL;
1796
1797 getrusage(current, who, &r);
1798 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1799}
1800
1801#ifdef CONFIG_COMPAT
1802COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1803{
1804 struct rusage r;
1805
1806 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1807 who != RUSAGE_THREAD)
1808 return -EINVAL;
1809
1810 getrusage(current, who, &r);
1811 return put_compat_rusage(&r, ru);
1812}
1813#endif
1814
1815SYSCALL_DEFINE1(umask, int, mask)
1816{
1817 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1818 return mask;
1819}
1820
1821static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1822{
1823 struct fd exe;
1824 struct file *old_exe, *exe_file;
1825 struct inode *inode;
1826 int err;
1827
1828 exe = fdget(fd);
1829 if (!exe.file)
1830 return -EBADF;
1831
1832 inode = file_inode(exe.file);
1833
1834
1835
1836
1837
1838
1839 err = -EACCES;
1840 if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1841 goto exit;
1842
1843 err = inode_permission(inode, MAY_EXEC);
1844 if (err)
1845 goto exit;
1846
1847
1848
1849
1850 exe_file = get_mm_exe_file(mm);
1851 err = -EBUSY;
1852 if (exe_file) {
1853 struct vm_area_struct *vma;
1854
1855 down_read(&mm->mmap_sem);
1856 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1857 if (!vma->vm_file)
1858 continue;
1859 if (path_equal(&vma->vm_file->f_path,
1860 &exe_file->f_path))
1861 goto exit_err;
1862 }
1863
1864 up_read(&mm->mmap_sem);
1865 fput(exe_file);
1866 }
1867
1868 err = 0;
1869
1870 get_file(exe.file);
1871 old_exe = xchg(&mm->exe_file, exe.file);
1872 if (old_exe)
1873 fput(old_exe);
1874exit:
1875 fdput(exe);
1876 return err;
1877exit_err:
1878 up_read(&mm->mmap_sem);
1879 fput(exe_file);
1880 goto exit;
1881}
1882
1883
1884
1885
1886
1887static int validate_prctl_map(struct prctl_mm_map *prctl_map)
1888{
1889 unsigned long mmap_max_addr = TASK_SIZE;
1890 struct mm_struct *mm = current->mm;
1891 int error = -EINVAL, i;
1892
1893 static const unsigned char offsets[] = {
1894 offsetof(struct prctl_mm_map, start_code),
1895 offsetof(struct prctl_mm_map, end_code),
1896 offsetof(struct prctl_mm_map, start_data),
1897 offsetof(struct prctl_mm_map, end_data),
1898 offsetof(struct prctl_mm_map, start_brk),
1899 offsetof(struct prctl_mm_map, brk),
1900 offsetof(struct prctl_mm_map, start_stack),
1901 offsetof(struct prctl_mm_map, arg_start),
1902 offsetof(struct prctl_mm_map, arg_end),
1903 offsetof(struct prctl_mm_map, env_start),
1904 offsetof(struct prctl_mm_map, env_end),
1905 };
1906
1907
1908
1909
1910
1911 for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1912 u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1913
1914 if ((unsigned long)val >= mmap_max_addr ||
1915 (unsigned long)val < mmap_min_addr)
1916 goto out;
1917 }
1918
1919
1920
1921
1922#define __prctl_check_order(__m1, __op, __m2) \
1923 ((unsigned long)prctl_map->__m1 __op \
1924 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1925 error = __prctl_check_order(start_code, <, end_code);
1926 error |= __prctl_check_order(start_data, <, end_data);
1927 error |= __prctl_check_order(start_brk, <=, brk);
1928 error |= __prctl_check_order(arg_start, <=, arg_end);
1929 error |= __prctl_check_order(env_start, <=, env_end);
1930 if (error)
1931 goto out;
1932#undef __prctl_check_order
1933
1934 error = -EINVAL;
1935
1936
1937
1938
1939 if (prctl_map->start_brk <= prctl_map->end_data ||
1940 prctl_map->brk <= prctl_map->end_data)
1941 goto out;
1942
1943
1944
1945
1946 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1947 prctl_map->start_brk, prctl_map->end_data,
1948 prctl_map->start_data))
1949 goto out;
1950
1951
1952
1953
1954 if (prctl_map->auxv_size) {
1955 if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv))
1956 goto out;
1957 }
1958
1959
1960
1961
1962
1963
1964 if (prctl_map->exe_fd != (u32)-1) {
1965 if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN))
1966 goto out;
1967 }
1968
1969 error = 0;
1970out:
1971 return error;
1972}
1973
1974#ifdef CONFIG_CHECKPOINT_RESTORE
1975static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1976{
1977 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1978 unsigned long user_auxv[AT_VECTOR_SIZE];
1979 struct mm_struct *mm = current->mm;
1980 int error;
1981
1982 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1983 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1984
1985 if (opt == PR_SET_MM_MAP_SIZE)
1986 return put_user((unsigned int)sizeof(prctl_map),
1987 (unsigned int __user *)addr);
1988
1989 if (data_size != sizeof(prctl_map))
1990 return -EINVAL;
1991
1992 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1993 return -EFAULT;
1994
1995 error = validate_prctl_map(&prctl_map);
1996 if (error)
1997 return error;
1998
1999 if (prctl_map.auxv_size) {
2000 memset(user_auxv, 0, sizeof(user_auxv));
2001 if (copy_from_user(user_auxv,
2002 (const void __user *)prctl_map.auxv,
2003 prctl_map.auxv_size))
2004 return -EFAULT;
2005
2006
2007 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
2008 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
2009 }
2010
2011 if (prctl_map.exe_fd != (u32)-1) {
2012 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
2013 if (error)
2014 return error;
2015 }
2016
2017
2018
2019
2020
2021 down_read(&mm->mmap_sem);
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035 spin_lock(&mm->arg_lock);
2036 mm->start_code = prctl_map.start_code;
2037 mm->end_code = prctl_map.end_code;
2038 mm->start_data = prctl_map.start_data;
2039 mm->end_data = prctl_map.end_data;
2040 mm->start_brk = prctl_map.start_brk;
2041 mm->brk = prctl_map.brk;
2042 mm->start_stack = prctl_map.start_stack;
2043 mm->arg_start = prctl_map.arg_start;
2044 mm->arg_end = prctl_map.arg_end;
2045 mm->env_start = prctl_map.env_start;
2046 mm->env_end = prctl_map.env_end;
2047 spin_unlock(&mm->arg_lock);
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057 if (prctl_map.auxv_size)
2058 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
2059
2060 up_read(&mm->mmap_sem);
2061 return 0;
2062}
2063#endif
2064
2065static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
2066 unsigned long len)
2067{
2068
2069
2070
2071
2072
2073
2074 unsigned long user_auxv[AT_VECTOR_SIZE];
2075
2076 if (len > sizeof(user_auxv))
2077 return -EINVAL;
2078
2079 if (copy_from_user(user_auxv, (const void __user *)addr, len))
2080 return -EFAULT;
2081
2082
2083 user_auxv[AT_VECTOR_SIZE - 2] = 0;
2084 user_auxv[AT_VECTOR_SIZE - 1] = 0;
2085
2086 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
2087
2088 task_lock(current);
2089 memcpy(mm->saved_auxv, user_auxv, len);
2090 task_unlock(current);
2091
2092 return 0;
2093}
2094
2095static int prctl_set_mm(int opt, unsigned long addr,
2096 unsigned long arg4, unsigned long arg5)
2097{
2098 struct mm_struct *mm = current->mm;
2099 struct prctl_mm_map prctl_map;
2100 struct vm_area_struct *vma;
2101 int error;
2102
2103 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
2104 opt != PR_SET_MM_MAP &&
2105 opt != PR_SET_MM_MAP_SIZE)))
2106 return -EINVAL;
2107
2108#ifdef CONFIG_CHECKPOINT_RESTORE
2109 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
2110 return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
2111#endif
2112
2113 if (!capable(CAP_SYS_RESOURCE))
2114 return -EPERM;
2115
2116 if (opt == PR_SET_MM_EXE_FILE)
2117 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
2118
2119 if (opt == PR_SET_MM_AUXV)
2120 return prctl_set_auxv(mm, addr, arg4);
2121
2122 if (addr >= TASK_SIZE || addr < mmap_min_addr)
2123 return -EINVAL;
2124
2125 error = -EINVAL;
2126
2127 down_write(&mm->mmap_sem);
2128 vma = find_vma(mm, addr);
2129
2130 prctl_map.start_code = mm->start_code;
2131 prctl_map.end_code = mm->end_code;
2132 prctl_map.start_data = mm->start_data;
2133 prctl_map.end_data = mm->end_data;
2134 prctl_map.start_brk = mm->start_brk;
2135 prctl_map.brk = mm->brk;
2136 prctl_map.start_stack = mm->start_stack;
2137 prctl_map.arg_start = mm->arg_start;
2138 prctl_map.arg_end = mm->arg_end;
2139 prctl_map.env_start = mm->env_start;
2140 prctl_map.env_end = mm->env_end;
2141 prctl_map.auxv = NULL;
2142 prctl_map.auxv_size = 0;
2143 prctl_map.exe_fd = -1;
2144
2145 switch (opt) {
2146 case PR_SET_MM_START_CODE:
2147 prctl_map.start_code = addr;
2148 break;
2149 case PR_SET_MM_END_CODE:
2150 prctl_map.end_code = addr;
2151 break;
2152 case PR_SET_MM_START_DATA:
2153 prctl_map.start_data = addr;
2154 break;
2155 case PR_SET_MM_END_DATA:
2156 prctl_map.end_data = addr;
2157 break;
2158 case PR_SET_MM_START_STACK:
2159 prctl_map.start_stack = addr;
2160 break;
2161 case PR_SET_MM_START_BRK:
2162 prctl_map.start_brk = addr;
2163 break;
2164 case PR_SET_MM_BRK:
2165 prctl_map.brk = addr;
2166 break;
2167 case PR_SET_MM_ARG_START:
2168 prctl_map.arg_start = addr;
2169 break;
2170 case PR_SET_MM_ARG_END:
2171 prctl_map.arg_end = addr;
2172 break;
2173 case PR_SET_MM_ENV_START:
2174 prctl_map.env_start = addr;
2175 break;
2176 case PR_SET_MM_ENV_END:
2177 prctl_map.env_end = addr;
2178 break;
2179 default:
2180 goto out;
2181 }
2182
2183 error = validate_prctl_map(&prctl_map);
2184 if (error)
2185 goto out;
2186
2187 switch (opt) {
2188
2189
2190
2191
2192
2193
2194
2195 case PR_SET_MM_START_STACK:
2196 case PR_SET_MM_ARG_START:
2197 case PR_SET_MM_ARG_END:
2198 case PR_SET_MM_ENV_START:
2199 case PR_SET_MM_ENV_END:
2200 if (!vma) {
2201 error = -EFAULT;
2202 goto out;
2203 }
2204 }
2205
2206 mm->start_code = prctl_map.start_code;
2207 mm->end_code = prctl_map.end_code;
2208 mm->start_data = prctl_map.start_data;
2209 mm->end_data = prctl_map.end_data;
2210 mm->start_brk = prctl_map.start_brk;
2211 mm->brk = prctl_map.brk;
2212 mm->start_stack = prctl_map.start_stack;
2213 mm->arg_start = prctl_map.arg_start;
2214 mm->arg_end = prctl_map.arg_end;
2215 mm->env_start = prctl_map.env_start;
2216 mm->env_end = prctl_map.env_end;
2217
2218 error = 0;
2219out:
2220 up_write(&mm->mmap_sem);
2221 return error;
2222}
2223
2224#ifdef CONFIG_CHECKPOINT_RESTORE
2225static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2226{
2227 return put_user(me->clear_child_tid, tid_addr);
2228}
2229#else
2230static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2231{
2232 return -EINVAL;
2233}
2234#endif
2235
2236static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2237{
2238
2239
2240
2241
2242
2243
2244
2245
2246 if (p->signal->has_child_subreaper ||
2247 is_child_reaper(task_pid(p)))
2248 return 0;
2249
2250 p->signal->has_child_subreaper = 1;
2251 return 1;
2252}
2253
2254int __weak arch_prctl_spec_ctrl_get(struct task_struct *t, unsigned long which)
2255{
2256 return -EINVAL;
2257}
2258
2259int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
2260 unsigned long ctrl)
2261{
2262 return -EINVAL;
2263}
2264
2265SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2266 unsigned long, arg4, unsigned long, arg5)
2267{
2268 struct task_struct *me = current;
2269 unsigned char comm[sizeof(me->comm)];
2270 long error;
2271
2272 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2273 if (error != -ENOSYS)
2274 return error;
2275
2276 error = 0;
2277 switch (option) {
2278 case PR_SET_PDEATHSIG:
2279 if (!valid_signal(arg2)) {
2280 error = -EINVAL;
2281 break;
2282 }
2283 me->pdeath_signal = arg2;
2284 break;
2285 case PR_GET_PDEATHSIG:
2286 error = put_user(me->pdeath_signal, (int __user *)arg2);
2287 break;
2288 case PR_GET_DUMPABLE:
2289 error = get_dumpable(me->mm);
2290 break;
2291 case PR_SET_DUMPABLE:
2292 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2293 error = -EINVAL;
2294 break;
2295 }
2296 set_dumpable(me->mm, arg2);
2297 break;
2298
2299 case PR_SET_UNALIGN:
2300 error = SET_UNALIGN_CTL(me, arg2);
2301 break;
2302 case PR_GET_UNALIGN:
2303 error = GET_UNALIGN_CTL(me, arg2);
2304 break;
2305 case PR_SET_FPEMU:
2306 error = SET_FPEMU_CTL(me, arg2);
2307 break;
2308 case PR_GET_FPEMU:
2309 error = GET_FPEMU_CTL(me, arg2);
2310 break;
2311 case PR_SET_FPEXC:
2312 error = SET_FPEXC_CTL(me, arg2);
2313 break;
2314 case PR_GET_FPEXC:
2315 error = GET_FPEXC_CTL(me, arg2);
2316 break;
2317 case PR_GET_TIMING:
2318 error = PR_TIMING_STATISTICAL;
2319 break;
2320 case PR_SET_TIMING:
2321 if (arg2 != PR_TIMING_STATISTICAL)
2322 error = -EINVAL;
2323 break;
2324 case PR_SET_NAME:
2325 comm[sizeof(me->comm) - 1] = 0;
2326 if (strncpy_from_user(comm, (char __user *)arg2,
2327 sizeof(me->comm) - 1) < 0)
2328 return -EFAULT;
2329 set_task_comm(me, comm);
2330 proc_comm_connector(me);
2331 break;
2332 case PR_GET_NAME:
2333 get_task_comm(comm, me);
2334 if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2335 return -EFAULT;
2336 break;
2337 case PR_GET_ENDIAN:
2338 error = GET_ENDIAN(me, arg2);
2339 break;
2340 case PR_SET_ENDIAN:
2341 error = SET_ENDIAN(me, arg2);
2342 break;
2343 case PR_GET_SECCOMP:
2344 error = prctl_get_seccomp();
2345 break;
2346 case PR_SET_SECCOMP:
2347 error = prctl_set_seccomp(arg2, (char __user *)arg3);
2348 break;
2349 case PR_GET_TSC:
2350 error = GET_TSC_CTL(arg2);
2351 break;
2352 case PR_SET_TSC:
2353 error = SET_TSC_CTL(arg2);
2354 break;
2355 case PR_TASK_PERF_EVENTS_DISABLE:
2356 error = perf_event_task_disable();
2357 break;
2358 case PR_TASK_PERF_EVENTS_ENABLE:
2359 error = perf_event_task_enable();
2360 break;
2361 case PR_GET_TIMERSLACK:
2362 if (current->timer_slack_ns > ULONG_MAX)
2363 error = ULONG_MAX;
2364 else
2365 error = current->timer_slack_ns;
2366 break;
2367 case PR_SET_TIMERSLACK:
2368 if (arg2 <= 0)
2369 current->timer_slack_ns =
2370 current->default_timer_slack_ns;
2371 else
2372 current->timer_slack_ns = arg2;
2373 break;
2374 case PR_MCE_KILL:
2375 if (arg4 | arg5)
2376 return -EINVAL;
2377 switch (arg2) {
2378 case PR_MCE_KILL_CLEAR:
2379 if (arg3 != 0)
2380 return -EINVAL;
2381 current->flags &= ~PF_MCE_PROCESS;
2382 break;
2383 case PR_MCE_KILL_SET:
2384 current->flags |= PF_MCE_PROCESS;
2385 if (arg3 == PR_MCE_KILL_EARLY)
2386 current->flags |= PF_MCE_EARLY;
2387 else if (arg3 == PR_MCE_KILL_LATE)
2388 current->flags &= ~PF_MCE_EARLY;
2389 else if (arg3 == PR_MCE_KILL_DEFAULT)
2390 current->flags &=
2391 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
2392 else
2393 return -EINVAL;
2394 break;
2395 default:
2396 return -EINVAL;
2397 }
2398 break;
2399 case PR_MCE_KILL_GET:
2400 if (arg2 | arg3 | arg4 | arg5)
2401 return -EINVAL;
2402 if (current->flags & PF_MCE_PROCESS)
2403 error = (current->flags & PF_MCE_EARLY) ?
2404 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2405 else
2406 error = PR_MCE_KILL_DEFAULT;
2407 break;
2408 case PR_SET_MM:
2409 error = prctl_set_mm(arg2, arg3, arg4, arg5);
2410 break;
2411 case PR_GET_TID_ADDRESS:
2412 error = prctl_get_tid_address(me, (int __user **)arg2);
2413 break;
2414 case PR_SET_CHILD_SUBREAPER:
2415 me->signal->is_child_subreaper = !!arg2;
2416 if (!arg2)
2417 break;
2418
2419 walk_process_tree(me, propagate_has_child_subreaper, NULL);
2420 break;
2421 case PR_GET_CHILD_SUBREAPER:
2422 error = put_user(me->signal->is_child_subreaper,
2423 (int __user *)arg2);
2424 break;
2425 case PR_SET_NO_NEW_PRIVS:
2426 if (arg2 != 1 || arg3 || arg4 || arg5)
2427 return -EINVAL;
2428
2429 task_set_no_new_privs(current);
2430 break;
2431 case PR_GET_NO_NEW_PRIVS:
2432 if (arg2 || arg3 || arg4 || arg5)
2433 return -EINVAL;
2434 return task_no_new_privs(current) ? 1 : 0;
2435 case PR_GET_THP_DISABLE:
2436 if (arg2 || arg3 || arg4 || arg5)
2437 return -EINVAL;
2438 error = !!test_bit(MMF_DISABLE_THP, &me->mm->flags);
2439 break;
2440 case PR_SET_THP_DISABLE:
2441 if (arg3 || arg4 || arg5)
2442 return -EINVAL;
2443 if (down_write_killable(&me->mm->mmap_sem))
2444 return -EINTR;
2445 if (arg2)
2446 set_bit(MMF_DISABLE_THP, &me->mm->flags);
2447 else
2448 clear_bit(MMF_DISABLE_THP, &me->mm->flags);
2449 up_write(&me->mm->mmap_sem);
2450 break;
2451 case PR_MPX_ENABLE_MANAGEMENT:
2452 if (arg2 || arg3 || arg4 || arg5)
2453 return -EINVAL;
2454 error = MPX_ENABLE_MANAGEMENT();
2455 break;
2456 case PR_MPX_DISABLE_MANAGEMENT:
2457 if (arg2 || arg3 || arg4 || arg5)
2458 return -EINVAL;
2459 error = MPX_DISABLE_MANAGEMENT();
2460 break;
2461 case PR_SET_FP_MODE:
2462 error = SET_FP_MODE(me, arg2);
2463 break;
2464 case PR_GET_FP_MODE:
2465 error = GET_FP_MODE(me);
2466 break;
2467 case PR_SVE_SET_VL:
2468 error = SVE_SET_VL(arg2);
2469 break;
2470 case PR_SVE_GET_VL:
2471 error = SVE_GET_VL();
2472 break;
2473 case PR_GET_SPECULATION_CTRL:
2474 if (arg3 || arg4 || arg5)
2475 return -EINVAL;
2476 error = arch_prctl_spec_ctrl_get(me, arg2);
2477 break;
2478 case PR_SET_SPECULATION_CTRL:
2479 if (arg4 || arg5)
2480 return -EINVAL;
2481 error = arch_prctl_spec_ctrl_set(me, arg2, arg3);
2482 break;
2483 case PR_PAC_RESET_KEYS:
2484 if (arg3 || arg4 || arg5)
2485 return -EINVAL;
2486 error = PAC_RESET_KEYS(me, arg2);
2487 break;
2488 default:
2489 error = -EINVAL;
2490 break;
2491 }
2492 return error;
2493}
2494
2495SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2496 struct getcpu_cache __user *, unused)
2497{
2498 int err = 0;
2499 int cpu = raw_smp_processor_id();
2500
2501 if (cpup)
2502 err |= put_user(cpu, cpup);
2503 if (nodep)
2504 err |= put_user(cpu_to_node(cpu), nodep);
2505 return err ? -EFAULT : 0;
2506}
2507
2508
2509
2510
2511
2512static int do_sysinfo(struct sysinfo *info)
2513{
2514 unsigned long mem_total, sav_total;
2515 unsigned int mem_unit, bitcount;
2516 struct timespec64 tp;
2517
2518 memset(info, 0, sizeof(struct sysinfo));
2519
2520 ktime_get_boottime_ts64(&tp);
2521 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2522
2523 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2524
2525 info->procs = nr_threads;
2526
2527 si_meminfo(info);
2528 si_swapinfo(info);
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539 mem_total = info->totalram + info->totalswap;
2540 if (mem_total < info->totalram || mem_total < info->totalswap)
2541 goto out;
2542 bitcount = 0;
2543 mem_unit = info->mem_unit;
2544 while (mem_unit > 1) {
2545 bitcount++;
2546 mem_unit >>= 1;
2547 sav_total = mem_total;
2548 mem_total <<= 1;
2549 if (mem_total < sav_total)
2550 goto out;
2551 }
2552
2553
2554
2555
2556
2557
2558
2559
2560 info->mem_unit = 1;
2561 info->totalram <<= bitcount;
2562 info->freeram <<= bitcount;
2563 info->sharedram <<= bitcount;
2564 info->bufferram <<= bitcount;
2565 info->totalswap <<= bitcount;
2566 info->freeswap <<= bitcount;
2567 info->totalhigh <<= bitcount;
2568 info->freehigh <<= bitcount;
2569
2570out:
2571 return 0;
2572}
2573
2574SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2575{
2576 struct sysinfo val;
2577
2578 do_sysinfo(&val);
2579
2580 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2581 return -EFAULT;
2582
2583 return 0;
2584}
2585
2586#ifdef CONFIG_COMPAT
2587struct compat_sysinfo {
2588 s32 uptime;
2589 u32 loads[3];
2590 u32 totalram;
2591 u32 freeram;
2592 u32 sharedram;
2593 u32 bufferram;
2594 u32 totalswap;
2595 u32 freeswap;
2596 u16 procs;
2597 u16 pad;
2598 u32 totalhigh;
2599 u32 freehigh;
2600 u32 mem_unit;
2601 char _f[20-2*sizeof(u32)-sizeof(int)];
2602};
2603
2604COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2605{
2606 struct sysinfo s;
2607
2608 do_sysinfo(&s);
2609
2610
2611
2612
2613 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2614 int bitcount = 0;
2615
2616 while (s.mem_unit < PAGE_SIZE) {
2617 s.mem_unit <<= 1;
2618 bitcount++;
2619 }
2620
2621 s.totalram >>= bitcount;
2622 s.freeram >>= bitcount;
2623 s.sharedram >>= bitcount;
2624 s.bufferram >>= bitcount;
2625 s.totalswap >>= bitcount;
2626 s.freeswap >>= bitcount;
2627 s.totalhigh >>= bitcount;
2628 s.freehigh >>= bitcount;
2629 }
2630
2631 if (!access_ok(info, sizeof(struct compat_sysinfo)) ||
2632 __put_user(s.uptime, &info->uptime) ||
2633 __put_user(s.loads[0], &info->loads[0]) ||
2634 __put_user(s.loads[1], &info->loads[1]) ||
2635 __put_user(s.loads[2], &info->loads[2]) ||
2636 __put_user(s.totalram, &info->totalram) ||
2637 __put_user(s.freeram, &info->freeram) ||
2638 __put_user(s.sharedram, &info->sharedram) ||
2639 __put_user(s.bufferram, &info->bufferram) ||
2640 __put_user(s.totalswap, &info->totalswap) ||
2641 __put_user(s.freeswap, &info->freeswap) ||
2642 __put_user(s.procs, &info->procs) ||
2643 __put_user(s.totalhigh, &info->totalhigh) ||
2644 __put_user(s.freehigh, &info->freehigh) ||
2645 __put_user(s.mem_unit, &info->mem_unit))
2646 return -EFAULT;
2647
2648 return 0;
2649}
2650#endif
2651