1
2
3
4
5
6
7#include <linux/export.h>
8#include <linux/mm.h>
9#include <linux/utsname.h>
10#include <linux/mman.h>
11#include <linux/reboot.h>
12#include <linux/prctl.h>
13#include <linux/highuid.h>
14#include <linux/fs.h>
15#include <linux/kmod.h>
16#include <linux/perf_event.h>
17#include <linux/resource.h>
18#include <linux/kernel.h>
19#include <linux/workqueue.h>
20#include <linux/capability.h>
21#include <linux/device.h>
22#include <linux/key.h>
23#include <linux/times.h>
24#include <linux/posix-timers.h>
25#include <linux/security.h>
26#include <linux/dcookies.h>
27#include <linux/suspend.h>
28#include <linux/tty.h>
29#include <linux/signal.h>
30#include <linux/cn_proc.h>
31#include <linux/getcpu.h>
32#include <linux/task_io_accounting_ops.h>
33#include <linux/seccomp.h>
34#include <linux/cpu.h>
35#include <linux/personality.h>
36#include <linux/ptrace.h>
37#include <linux/fs_struct.h>
38#include <linux/file.h>
39#include <linux/mount.h>
40#include <linux/gfp.h>
41#include <linux/syscore_ops.h>
42#include <linux/version.h>
43#include <linux/ctype.h>
44
45#include <linux/compat.h>
46#include <linux/syscalls.h>
47#include <linux/kprobes.h>
48#include <linux/user_namespace.h>
49#include <linux/binfmts.h>
50
51#include <linux/sched.h>
52#include <linux/rcupdate.h>
53#include <linux/uidgid.h>
54#include <linux/cred.h>
55
56#include <linux/kmsg_dump.h>
57
58#include <generated/utsrelease.h>
59
60#include <asm/uaccess.h>
61#include <asm/io.h>
62#include <asm/unistd.h>
63
64#ifndef SET_UNALIGN_CTL
65# define SET_UNALIGN_CTL(a,b) (-EINVAL)
66#endif
67#ifndef GET_UNALIGN_CTL
68# define GET_UNALIGN_CTL(a,b) (-EINVAL)
69#endif
70#ifndef SET_FPEMU_CTL
71# define SET_FPEMU_CTL(a,b) (-EINVAL)
72#endif
73#ifndef GET_FPEMU_CTL
74# define GET_FPEMU_CTL(a,b) (-EINVAL)
75#endif
76#ifndef SET_FPEXC_CTL
77# define SET_FPEXC_CTL(a,b) (-EINVAL)
78#endif
79#ifndef GET_FPEXC_CTL
80# define GET_FPEXC_CTL(a,b) (-EINVAL)
81#endif
82#ifndef GET_ENDIAN
83# define GET_ENDIAN(a,b) (-EINVAL)
84#endif
85#ifndef SET_ENDIAN
86# define SET_ENDIAN(a,b) (-EINVAL)
87#endif
88#ifndef GET_TSC_CTL
89# define GET_TSC_CTL(a) (-EINVAL)
90#endif
91#ifndef SET_TSC_CTL
92# define SET_TSC_CTL(a) (-EINVAL)
93#endif
94
95
96
97
98
99
100int overflowuid = DEFAULT_OVERFLOWUID;
101int overflowgid = DEFAULT_OVERFLOWGID;
102
103EXPORT_SYMBOL(overflowuid);
104EXPORT_SYMBOL(overflowgid);
105
106
107
108
109
110
111int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
112int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
113
114EXPORT_SYMBOL(fs_overflowuid);
115EXPORT_SYMBOL(fs_overflowgid);
116
117
118
119
120
121
122
123static bool set_one_prio_perm(struct task_struct *p)
124{
125 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
126
127 if (uid_eq(pcred->uid, cred->euid) ||
128 uid_eq(pcred->euid, cred->euid))
129 return true;
130 if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
131 return true;
132 return false;
133}
134
135
136
137
138
139static int set_one_prio(struct task_struct *p, int niceval, int error)
140{
141 int no_nice;
142
143 if (!set_one_prio_perm(p)) {
144 error = -EPERM;
145 goto out;
146 }
147 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
148 error = -EACCES;
149 goto out;
150 }
151 no_nice = security_task_setnice(p, niceval);
152 if (no_nice) {
153 error = no_nice;
154 goto out;
155 }
156 if (error == -ESRCH)
157 error = 0;
158 set_user_nice(p, niceval);
159out:
160 return error;
161}
162
163SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
164{
165 struct task_struct *g, *p;
166 struct user_struct *user;
167 const struct cred *cred = current_cred();
168 int error = -EINVAL;
169 struct pid *pgrp;
170 kuid_t uid;
171
172 if (which > PRIO_USER || which < PRIO_PROCESS)
173 goto out;
174
175
176 error = -ESRCH;
177 if (niceval < MIN_NICE)
178 niceval = MIN_NICE;
179 if (niceval > MAX_NICE)
180 niceval = MAX_NICE;
181
182 rcu_read_lock();
183 read_lock(&tasklist_lock);
184 switch (which) {
185 case PRIO_PROCESS:
186 if (who)
187 p = find_task_by_vpid(who);
188 else
189 p = current;
190 if (p)
191 error = set_one_prio(p, niceval, error);
192 break;
193 case PRIO_PGRP:
194 if (who)
195 pgrp = find_vpid(who);
196 else
197 pgrp = task_pgrp(current);
198 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
199 error = set_one_prio(p, niceval, error);
200 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
201 break;
202 case PRIO_USER:
203 uid = make_kuid(cred->user_ns, who);
204 user = cred->user;
205 if (!who)
206 uid = cred->uid;
207 else if (!uid_eq(uid, cred->uid) &&
208 !(user = find_user(uid)))
209 goto out_unlock;
210
211 do_each_thread(g, p) {
212 if (uid_eq(task_uid(p), uid))
213 error = set_one_prio(p, niceval, error);
214 } while_each_thread(g, p);
215 if (!uid_eq(uid, cred->uid))
216 free_uid(user);
217 break;
218 }
219out_unlock:
220 read_unlock(&tasklist_lock);
221 rcu_read_unlock();
222out:
223 return error;
224}
225
226
227
228
229
230
231
232SYSCALL_DEFINE2(getpriority, int, which, int, who)
233{
234 struct task_struct *g, *p;
235 struct user_struct *user;
236 const struct cred *cred = current_cred();
237 long niceval, retval = -ESRCH;
238 struct pid *pgrp;
239 kuid_t uid;
240
241 if (which > PRIO_USER || which < PRIO_PROCESS)
242 return -EINVAL;
243
244 rcu_read_lock();
245 read_lock(&tasklist_lock);
246 switch (which) {
247 case PRIO_PROCESS:
248 if (who)
249 p = find_task_by_vpid(who);
250 else
251 p = current;
252 if (p) {
253 niceval = nice_to_rlimit(task_nice(p));
254 if (niceval > retval)
255 retval = niceval;
256 }
257 break;
258 case PRIO_PGRP:
259 if (who)
260 pgrp = find_vpid(who);
261 else
262 pgrp = task_pgrp(current);
263 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
264 niceval = nice_to_rlimit(task_nice(p));
265 if (niceval > retval)
266 retval = niceval;
267 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
268 break;
269 case PRIO_USER:
270 uid = make_kuid(cred->user_ns, who);
271 user = cred->user;
272 if (!who)
273 uid = cred->uid;
274 else if (!uid_eq(uid, cred->uid) &&
275 !(user = find_user(uid)))
276 goto out_unlock;
277
278 do_each_thread(g, p) {
279 if (uid_eq(task_uid(p), uid)) {
280 niceval = nice_to_rlimit(task_nice(p));
281 if (niceval > retval)
282 retval = niceval;
283 }
284 } while_each_thread(g, p);
285 if (!uid_eq(uid, cred->uid))
286 free_uid(user);
287 break;
288 }
289out_unlock:
290 read_unlock(&tasklist_lock);
291 rcu_read_unlock();
292
293 return retval;
294}
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
315{
316 struct user_namespace *ns = current_user_ns();
317 const struct cred *old;
318 struct cred *new;
319 int retval;
320 kgid_t krgid, kegid;
321
322 krgid = make_kgid(ns, rgid);
323 kegid = make_kgid(ns, egid);
324
325 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
326 return -EINVAL;
327 if ((egid != (gid_t) -1) && !gid_valid(kegid))
328 return -EINVAL;
329
330 new = prepare_creds();
331 if (!new)
332 return -ENOMEM;
333 old = current_cred();
334
335 retval = -EPERM;
336 if (rgid != (gid_t) -1) {
337 if (gid_eq(old->gid, krgid) ||
338 gid_eq(old->egid, krgid) ||
339 ns_capable(old->user_ns, CAP_SETGID))
340 new->gid = krgid;
341 else
342 goto error;
343 }
344 if (egid != (gid_t) -1) {
345 if (gid_eq(old->gid, kegid) ||
346 gid_eq(old->egid, kegid) ||
347 gid_eq(old->sgid, kegid) ||
348 ns_capable(old->user_ns, CAP_SETGID))
349 new->egid = kegid;
350 else
351 goto error;
352 }
353
354 if (rgid != (gid_t) -1 ||
355 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
356 new->sgid = new->egid;
357 new->fsgid = new->egid;
358
359 return commit_creds(new);
360
361error:
362 abort_creds(new);
363 return retval;
364}
365
366
367
368
369
370
371SYSCALL_DEFINE1(setgid, gid_t, gid)
372{
373 struct user_namespace *ns = current_user_ns();
374 const struct cred *old;
375 struct cred *new;
376 int retval;
377 kgid_t kgid;
378
379 kgid = make_kgid(ns, gid);
380 if (!gid_valid(kgid))
381 return -EINVAL;
382
383 new = prepare_creds();
384 if (!new)
385 return -ENOMEM;
386 old = current_cred();
387
388 retval = -EPERM;
389 if (ns_capable(old->user_ns, CAP_SETGID))
390 new->gid = new->egid = new->sgid = new->fsgid = kgid;
391 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
392 new->egid = new->fsgid = kgid;
393 else
394 goto error;
395
396 return commit_creds(new);
397
398error:
399 abort_creds(new);
400 return retval;
401}
402
403
404
405
406static int set_user(struct cred *new)
407{
408 struct user_struct *new_user;
409
410 new_user = alloc_uid(new->uid);
411 if (!new_user)
412 return -EAGAIN;
413
414
415
416
417
418
419
420
421 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
422 new_user != INIT_USER)
423 current->flags |= PF_NPROC_EXCEEDED;
424 else
425 current->flags &= ~PF_NPROC_EXCEEDED;
426
427 free_uid(new->user);
428 new->user = new_user;
429 return 0;
430}
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
448{
449 struct user_namespace *ns = current_user_ns();
450 const struct cred *old;
451 struct cred *new;
452 int retval;
453 kuid_t kruid, keuid;
454
455 kruid = make_kuid(ns, ruid);
456 keuid = make_kuid(ns, euid);
457
458 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
459 return -EINVAL;
460 if ((euid != (uid_t) -1) && !uid_valid(keuid))
461 return -EINVAL;
462
463 new = prepare_creds();
464 if (!new)
465 return -ENOMEM;
466 old = current_cred();
467
468 retval = -EPERM;
469 if (ruid != (uid_t) -1) {
470 new->uid = kruid;
471 if (!uid_eq(old->uid, kruid) &&
472 !uid_eq(old->euid, kruid) &&
473 !ns_capable(old->user_ns, CAP_SETUID))
474 goto error;
475 }
476
477 if (euid != (uid_t) -1) {
478 new->euid = keuid;
479 if (!uid_eq(old->uid, keuid) &&
480 !uid_eq(old->euid, keuid) &&
481 !uid_eq(old->suid, keuid) &&
482 !ns_capable(old->user_ns, CAP_SETUID))
483 goto error;
484 }
485
486 if (!uid_eq(new->uid, old->uid)) {
487 retval = set_user(new);
488 if (retval < 0)
489 goto error;
490 }
491 if (ruid != (uid_t) -1 ||
492 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
493 new->suid = new->euid;
494 new->fsuid = new->euid;
495
496 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
497 if (retval < 0)
498 goto error;
499
500 return commit_creds(new);
501
502error:
503 abort_creds(new);
504 return retval;
505}
506
507
508
509
510
511
512
513
514
515
516
517
518SYSCALL_DEFINE1(setuid, uid_t, uid)
519{
520 struct user_namespace *ns = current_user_ns();
521 const struct cred *old;
522 struct cred *new;
523 int retval;
524 kuid_t kuid;
525
526 kuid = make_kuid(ns, uid);
527 if (!uid_valid(kuid))
528 return -EINVAL;
529
530 new = prepare_creds();
531 if (!new)
532 return -ENOMEM;
533 old = current_cred();
534
535 retval = -EPERM;
536 if (ns_capable(old->user_ns, CAP_SETUID)) {
537 new->suid = new->uid = kuid;
538 if (!uid_eq(kuid, old->uid)) {
539 retval = set_user(new);
540 if (retval < 0)
541 goto error;
542 }
543 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
544 goto error;
545 }
546
547 new->fsuid = new->euid = kuid;
548
549 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
550 if (retval < 0)
551 goto error;
552
553 return commit_creds(new);
554
555error:
556 abort_creds(new);
557 return retval;
558}
559
560
561
562
563
564
565SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
566{
567 struct user_namespace *ns = current_user_ns();
568 const struct cred *old;
569 struct cred *new;
570 int retval;
571 kuid_t kruid, keuid, ksuid;
572
573 kruid = make_kuid(ns, ruid);
574 keuid = make_kuid(ns, euid);
575 ksuid = make_kuid(ns, suid);
576
577 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
578 return -EINVAL;
579
580 if ((euid != (uid_t) -1) && !uid_valid(keuid))
581 return -EINVAL;
582
583 if ((suid != (uid_t) -1) && !uid_valid(ksuid))
584 return -EINVAL;
585
586 new = prepare_creds();
587 if (!new)
588 return -ENOMEM;
589
590 old = current_cred();
591
592 retval = -EPERM;
593 if (!ns_capable(old->user_ns, CAP_SETUID)) {
594 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
595 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
596 goto error;
597 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
598 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
599 goto error;
600 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
601 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
602 goto error;
603 }
604
605 if (ruid != (uid_t) -1) {
606 new->uid = kruid;
607 if (!uid_eq(kruid, old->uid)) {
608 retval = set_user(new);
609 if (retval < 0)
610 goto error;
611 }
612 }
613 if (euid != (uid_t) -1)
614 new->euid = keuid;
615 if (suid != (uid_t) -1)
616 new->suid = ksuid;
617 new->fsuid = new->euid;
618
619 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
620 if (retval < 0)
621 goto error;
622
623 return commit_creds(new);
624
625error:
626 abort_creds(new);
627 return retval;
628}
629
630SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
631{
632 const struct cred *cred = current_cred();
633 int retval;
634 uid_t ruid, euid, suid;
635
636 ruid = from_kuid_munged(cred->user_ns, cred->uid);
637 euid = from_kuid_munged(cred->user_ns, cred->euid);
638 suid = from_kuid_munged(cred->user_ns, cred->suid);
639
640 if (!(retval = put_user(ruid, ruidp)) &&
641 !(retval = put_user(euid, euidp)))
642 retval = put_user(suid, suidp);
643
644 return retval;
645}
646
647
648
649
650SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
651{
652 struct user_namespace *ns = current_user_ns();
653 const struct cred *old;
654 struct cred *new;
655 int retval;
656 kgid_t krgid, kegid, ksgid;
657
658 krgid = make_kgid(ns, rgid);
659 kegid = make_kgid(ns, egid);
660 ksgid = make_kgid(ns, sgid);
661
662 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
663 return -EINVAL;
664 if ((egid != (gid_t) -1) && !gid_valid(kegid))
665 return -EINVAL;
666 if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
667 return -EINVAL;
668
669 new = prepare_creds();
670 if (!new)
671 return -ENOMEM;
672 old = current_cred();
673
674 retval = -EPERM;
675 if (!ns_capable(old->user_ns, CAP_SETGID)) {
676 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
677 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
678 goto error;
679 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
680 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
681 goto error;
682 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
683 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
684 goto error;
685 }
686
687 if (rgid != (gid_t) -1)
688 new->gid = krgid;
689 if (egid != (gid_t) -1)
690 new->egid = kegid;
691 if (sgid != (gid_t) -1)
692 new->sgid = ksgid;
693 new->fsgid = new->egid;
694
695 return commit_creds(new);
696
697error:
698 abort_creds(new);
699 return retval;
700}
701
702SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
703{
704 const struct cred *cred = current_cred();
705 int retval;
706 gid_t rgid, egid, sgid;
707
708 rgid = from_kgid_munged(cred->user_ns, cred->gid);
709 egid = from_kgid_munged(cred->user_ns, cred->egid);
710 sgid = from_kgid_munged(cred->user_ns, cred->sgid);
711
712 if (!(retval = put_user(rgid, rgidp)) &&
713 !(retval = put_user(egid, egidp)))
714 retval = put_user(sgid, sgidp);
715
716 return retval;
717}
718
719
720
721
722
723
724
725
726SYSCALL_DEFINE1(setfsuid, uid_t, uid)
727{
728 const struct cred *old;
729 struct cred *new;
730 uid_t old_fsuid;
731 kuid_t kuid;
732
733 old = current_cred();
734 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
735
736 kuid = make_kuid(old->user_ns, uid);
737 if (!uid_valid(kuid))
738 return old_fsuid;
739
740 new = prepare_creds();
741 if (!new)
742 return old_fsuid;
743
744 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
745 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
746 ns_capable(old->user_ns, CAP_SETUID)) {
747 if (!uid_eq(kuid, old->fsuid)) {
748 new->fsuid = kuid;
749 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
750 goto change_okay;
751 }
752 }
753
754 abort_creds(new);
755 return old_fsuid;
756
757change_okay:
758 commit_creds(new);
759 return old_fsuid;
760}
761
762
763
764
765SYSCALL_DEFINE1(setfsgid, gid_t, gid)
766{
767 const struct cred *old;
768 struct cred *new;
769 gid_t old_fsgid;
770 kgid_t kgid;
771
772 old = current_cred();
773 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
774
775 kgid = make_kgid(old->user_ns, gid);
776 if (!gid_valid(kgid))
777 return old_fsgid;
778
779 new = prepare_creds();
780 if (!new)
781 return old_fsgid;
782
783 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
784 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
785 ns_capable(old->user_ns, CAP_SETGID)) {
786 if (!gid_eq(kgid, old->fsgid)) {
787 new->fsgid = kgid;
788 goto change_okay;
789 }
790 }
791
792 abort_creds(new);
793 return old_fsgid;
794
795change_okay:
796 commit_creds(new);
797 return old_fsgid;
798}
799
800
801
802
803
804
805
806
807
808
809SYSCALL_DEFINE0(getpid)
810{
811 return task_tgid_vnr(current);
812}
813
814
815SYSCALL_DEFINE0(gettid)
816{
817 return task_pid_vnr(current);
818}
819
820
821
822
823
824
825
826SYSCALL_DEFINE0(getppid)
827{
828 int pid;
829
830 rcu_read_lock();
831 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
832 rcu_read_unlock();
833
834 return pid;
835}
836
837SYSCALL_DEFINE0(getuid)
838{
839
840 return from_kuid_munged(current_user_ns(), current_uid());
841}
842
843SYSCALL_DEFINE0(geteuid)
844{
845
846 return from_kuid_munged(current_user_ns(), current_euid());
847}
848
849SYSCALL_DEFINE0(getgid)
850{
851
852 return from_kgid_munged(current_user_ns(), current_gid());
853}
854
855SYSCALL_DEFINE0(getegid)
856{
857
858 return from_kgid_munged(current_user_ns(), current_egid());
859}
860
861void do_sys_times(struct tms *tms)
862{
863 cputime_t tgutime, tgstime, cutime, cstime;
864
865 spin_lock_irq(¤t->sighand->siglock);
866 thread_group_cputime_adjusted(current, &tgutime, &tgstime);
867 cutime = current->signal->cutime;
868 cstime = current->signal->cstime;
869 spin_unlock_irq(¤t->sighand->siglock);
870 tms->tms_utime = cputime_to_clock_t(tgutime);
871 tms->tms_stime = cputime_to_clock_t(tgstime);
872 tms->tms_cutime = cputime_to_clock_t(cutime);
873 tms->tms_cstime = cputime_to_clock_t(cstime);
874}
875
876SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
877{
878 if (tbuf) {
879 struct tms tmp;
880
881 do_sys_times(&tmp);
882 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
883 return -EFAULT;
884 }
885 force_successful_syscall_return();
886 return (long) jiffies_64_to_clock_t(get_jiffies_64());
887}
888
889
890
891
892
893
894
895
896
897
898
899
900SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
901{
902 struct task_struct *p;
903 struct task_struct *group_leader = current->group_leader;
904 struct pid *pgrp;
905 int err;
906
907 if (!pid)
908 pid = task_pid_vnr(group_leader);
909 if (!pgid)
910 pgid = pid;
911 if (pgid < 0)
912 return -EINVAL;
913 rcu_read_lock();
914
915
916
917
918 write_lock_irq(&tasklist_lock);
919
920 err = -ESRCH;
921 p = find_task_by_vpid(pid);
922 if (!p)
923 goto out;
924
925 err = -EINVAL;
926 if (!thread_group_leader(p))
927 goto out;
928
929 if (same_thread_group(p->real_parent, group_leader)) {
930 err = -EPERM;
931 if (task_session(p) != task_session(group_leader))
932 goto out;
933 err = -EACCES;
934 if (!(p->flags & PF_FORKNOEXEC))
935 goto out;
936 } else {
937 err = -ESRCH;
938 if (p != group_leader)
939 goto out;
940 }
941
942 err = -EPERM;
943 if (p->signal->leader)
944 goto out;
945
946 pgrp = task_pid(p);
947 if (pgid != pid) {
948 struct task_struct *g;
949
950 pgrp = find_vpid(pgid);
951 g = pid_task(pgrp, PIDTYPE_PGID);
952 if (!g || task_session(g) != task_session(group_leader))
953 goto out;
954 }
955
956 err = security_task_setpgid(p, pgid);
957 if (err)
958 goto out;
959
960 if (task_pgrp(p) != pgrp)
961 change_pid(p, PIDTYPE_PGID, pgrp);
962
963 err = 0;
964out:
965
966 write_unlock_irq(&tasklist_lock);
967 rcu_read_unlock();
968 return err;
969}
970
971SYSCALL_DEFINE1(getpgid, pid_t, pid)
972{
973 struct task_struct *p;
974 struct pid *grp;
975 int retval;
976
977 rcu_read_lock();
978 if (!pid)
979 grp = task_pgrp(current);
980 else {
981 retval = -ESRCH;
982 p = find_task_by_vpid(pid);
983 if (!p)
984 goto out;
985 grp = task_pgrp(p);
986 if (!grp)
987 goto out;
988
989 retval = security_task_getpgid(p);
990 if (retval)
991 goto out;
992 }
993 retval = pid_vnr(grp);
994out:
995 rcu_read_unlock();
996 return retval;
997}
998
999#ifdef __ARCH_WANT_SYS_GETPGRP
1000
1001SYSCALL_DEFINE0(getpgrp)
1002{
1003 return sys_getpgid(0);
1004}
1005
1006#endif
1007
1008SYSCALL_DEFINE1(getsid, pid_t, pid)
1009{
1010 struct task_struct *p;
1011 struct pid *sid;
1012 int retval;
1013
1014 rcu_read_lock();
1015 if (!pid)
1016 sid = task_session(current);
1017 else {
1018 retval = -ESRCH;
1019 p = find_task_by_vpid(pid);
1020 if (!p)
1021 goto out;
1022 sid = task_session(p);
1023 if (!sid)
1024 goto out;
1025
1026 retval = security_task_getsid(p);
1027 if (retval)
1028 goto out;
1029 }
1030 retval = pid_vnr(sid);
1031out:
1032 rcu_read_unlock();
1033 return retval;
1034}
1035
1036static void set_special_pids(struct pid *pid)
1037{
1038 struct task_struct *curr = current->group_leader;
1039
1040 if (task_session(curr) != pid)
1041 change_pid(curr, PIDTYPE_SID, pid);
1042
1043 if (task_pgrp(curr) != pid)
1044 change_pid(curr, PIDTYPE_PGID, pid);
1045}
1046
1047SYSCALL_DEFINE0(setsid)
1048{
1049 struct task_struct *group_leader = current->group_leader;
1050 struct pid *sid = task_pid(group_leader);
1051 pid_t session = pid_vnr(sid);
1052 int err = -EPERM;
1053
1054 write_lock_irq(&tasklist_lock);
1055
1056 if (group_leader->signal->leader)
1057 goto out;
1058
1059
1060
1061
1062 if (pid_task(sid, PIDTYPE_PGID))
1063 goto out;
1064
1065 group_leader->signal->leader = 1;
1066 set_special_pids(sid);
1067
1068 proc_clear_tty(group_leader);
1069
1070 err = session;
1071out:
1072 write_unlock_irq(&tasklist_lock);
1073 if (err > 0) {
1074 proc_sid_connector(group_leader);
1075 sched_autogroup_create_attach(group_leader);
1076 }
1077 return err;
1078}
1079
1080DECLARE_RWSEM(uts_sem);
1081
1082#ifdef COMPAT_UTS_MACHINE
1083#define override_architecture(name) \
1084 (personality(current->personality) == PER_LINUX32 && \
1085 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1086 sizeof(COMPAT_UTS_MACHINE)))
1087#else
1088#define override_architecture(name) 0
1089#endif
1090
1091
1092
1093
1094
1095static int override_release(char __user *release, size_t len)
1096{
1097 int ret = 0;
1098
1099 if (current->personality & UNAME26) {
1100 const char *rest = UTS_RELEASE;
1101 char buf[65] = { 0 };
1102 int ndots = 0;
1103 unsigned v;
1104 size_t copy;
1105
1106 while (*rest) {
1107 if (*rest == '.' && ++ndots >= 3)
1108 break;
1109 if (!isdigit(*rest) && *rest != '.')
1110 break;
1111 rest++;
1112 }
1113 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40;
1114 copy = clamp_t(size_t, len, 1, sizeof(buf));
1115 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1116 ret = copy_to_user(release, buf, copy + 1);
1117 }
1118 return ret;
1119}
1120
1121SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1122{
1123 int errno = 0;
1124
1125 down_read(&uts_sem);
1126 if (copy_to_user(name, utsname(), sizeof *name))
1127 errno = -EFAULT;
1128 up_read(&uts_sem);
1129
1130 if (!errno && override_release(name->release, sizeof(name->release)))
1131 errno = -EFAULT;
1132 if (!errno && override_architecture(name))
1133 errno = -EFAULT;
1134 return errno;
1135}
1136
1137#ifdef __ARCH_WANT_SYS_OLD_UNAME
1138
1139
1140
1141SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1142{
1143 int error = 0;
1144
1145 if (!name)
1146 return -EFAULT;
1147
1148 down_read(&uts_sem);
1149 if (copy_to_user(name, utsname(), sizeof(*name)))
1150 error = -EFAULT;
1151 up_read(&uts_sem);
1152
1153 if (!error && override_release(name->release, sizeof(name->release)))
1154 error = -EFAULT;
1155 if (!error && override_architecture(name))
1156 error = -EFAULT;
1157 return error;
1158}
1159
1160SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1161{
1162 int error;
1163
1164 if (!name)
1165 return -EFAULT;
1166 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1167 return -EFAULT;
1168
1169 down_read(&uts_sem);
1170 error = __copy_to_user(&name->sysname, &utsname()->sysname,
1171 __OLD_UTS_LEN);
1172 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1173 error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1174 __OLD_UTS_LEN);
1175 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1176 error |= __copy_to_user(&name->release, &utsname()->release,
1177 __OLD_UTS_LEN);
1178 error |= __put_user(0, name->release + __OLD_UTS_LEN);
1179 error |= __copy_to_user(&name->version, &utsname()->version,
1180 __OLD_UTS_LEN);
1181 error |= __put_user(0, name->version + __OLD_UTS_LEN);
1182 error |= __copy_to_user(&name->machine, &utsname()->machine,
1183 __OLD_UTS_LEN);
1184 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1185 up_read(&uts_sem);
1186
1187 if (!error && override_architecture(name))
1188 error = -EFAULT;
1189 if (!error && override_release(name->release, sizeof(name->release)))
1190 error = -EFAULT;
1191 return error ? -EFAULT : 0;
1192}
1193#endif
1194
1195SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1196{
1197 int errno;
1198 char tmp[__NEW_UTS_LEN];
1199
1200 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1201 return -EPERM;
1202
1203 if (len < 0 || len > __NEW_UTS_LEN)
1204 return -EINVAL;
1205 down_write(&uts_sem);
1206 errno = -EFAULT;
1207 if (!copy_from_user(tmp, name, len)) {
1208 struct new_utsname *u = utsname();
1209
1210 memcpy(u->nodename, tmp, len);
1211 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1212 errno = 0;
1213 uts_proc_notify(UTS_PROC_HOSTNAME);
1214 }
1215 up_write(&uts_sem);
1216 return errno;
1217}
1218
1219#ifdef __ARCH_WANT_SYS_GETHOSTNAME
1220
1221SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1222{
1223 int i, errno;
1224 struct new_utsname *u;
1225
1226 if (len < 0)
1227 return -EINVAL;
1228 down_read(&uts_sem);
1229 u = utsname();
1230 i = 1 + strlen(u->nodename);
1231 if (i > len)
1232 i = len;
1233 errno = 0;
1234 if (copy_to_user(name, u->nodename, i))
1235 errno = -EFAULT;
1236 up_read(&uts_sem);
1237 return errno;
1238}
1239
1240#endif
1241
1242
1243
1244
1245
1246SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1247{
1248 int errno;
1249 char tmp[__NEW_UTS_LEN];
1250
1251 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1252 return -EPERM;
1253 if (len < 0 || len > __NEW_UTS_LEN)
1254 return -EINVAL;
1255
1256 down_write(&uts_sem);
1257 errno = -EFAULT;
1258 if (!copy_from_user(tmp, name, len)) {
1259 struct new_utsname *u = utsname();
1260
1261 memcpy(u->domainname, tmp, len);
1262 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1263 errno = 0;
1264 uts_proc_notify(UTS_PROC_DOMAINNAME);
1265 }
1266 up_write(&uts_sem);
1267 return errno;
1268}
1269
1270SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1271{
1272 struct rlimit value;
1273 int ret;
1274
1275 ret = do_prlimit(current, resource, NULL, &value);
1276 if (!ret)
1277 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1278
1279 return ret;
1280}
1281
1282#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1283
1284
1285
1286
1287
1288SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1289 struct rlimit __user *, rlim)
1290{
1291 struct rlimit x;
1292 if (resource >= RLIM_NLIMITS)
1293 return -EINVAL;
1294
1295 task_lock(current->group_leader);
1296 x = current->signal->rlim[resource];
1297 task_unlock(current->group_leader);
1298 if (x.rlim_cur > 0x7FFFFFFF)
1299 x.rlim_cur = 0x7FFFFFFF;
1300 if (x.rlim_max > 0x7FFFFFFF)
1301 x.rlim_max = 0x7FFFFFFF;
1302 return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
1303}
1304
1305#endif
1306
1307static inline bool rlim64_is_infinity(__u64 rlim64)
1308{
1309#if BITS_PER_LONG < 64
1310 return rlim64 >= ULONG_MAX;
1311#else
1312 return rlim64 == RLIM64_INFINITY;
1313#endif
1314}
1315
1316static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1317{
1318 if (rlim->rlim_cur == RLIM_INFINITY)
1319 rlim64->rlim_cur = RLIM64_INFINITY;
1320 else
1321 rlim64->rlim_cur = rlim->rlim_cur;
1322 if (rlim->rlim_max == RLIM_INFINITY)
1323 rlim64->rlim_max = RLIM64_INFINITY;
1324 else
1325 rlim64->rlim_max = rlim->rlim_max;
1326}
1327
1328static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1329{
1330 if (rlim64_is_infinity(rlim64->rlim_cur))
1331 rlim->rlim_cur = RLIM_INFINITY;
1332 else
1333 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1334 if (rlim64_is_infinity(rlim64->rlim_max))
1335 rlim->rlim_max = RLIM_INFINITY;
1336 else
1337 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1338}
1339
1340
1341int do_prlimit(struct task_struct *tsk, unsigned int resource,
1342 struct rlimit *new_rlim, struct rlimit *old_rlim)
1343{
1344 struct rlimit *rlim;
1345 int retval = 0;
1346
1347 if (resource >= RLIM_NLIMITS)
1348 return -EINVAL;
1349 if (new_rlim) {
1350 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1351 return -EINVAL;
1352 if (resource == RLIMIT_NOFILE &&
1353 new_rlim->rlim_max > sysctl_nr_open)
1354 return -EPERM;
1355 }
1356
1357
1358 read_lock(&tasklist_lock);
1359 if (!tsk->sighand) {
1360 retval = -ESRCH;
1361 goto out;
1362 }
1363
1364 rlim = tsk->signal->rlim + resource;
1365 task_lock(tsk->group_leader);
1366 if (new_rlim) {
1367
1368
1369 if (new_rlim->rlim_max > rlim->rlim_max &&
1370 !capable(CAP_SYS_RESOURCE))
1371 retval = -EPERM;
1372 if (!retval)
1373 retval = security_task_setrlimit(tsk->group_leader,
1374 resource, new_rlim);
1375 if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1376
1377
1378
1379
1380
1381
1382 new_rlim->rlim_cur = 1;
1383 }
1384 }
1385 if (!retval) {
1386 if (old_rlim)
1387 *old_rlim = *rlim;
1388 if (new_rlim)
1389 *rlim = *new_rlim;
1390 }
1391 task_unlock(tsk->group_leader);
1392
1393
1394
1395
1396
1397
1398
1399 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1400 new_rlim->rlim_cur != RLIM_INFINITY)
1401 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1402out:
1403 read_unlock(&tasklist_lock);
1404 return retval;
1405}
1406
1407
1408static int check_prlimit_permission(struct task_struct *task)
1409{
1410 const struct cred *cred = current_cred(), *tcred;
1411
1412 if (current == task)
1413 return 0;
1414
1415 tcred = __task_cred(task);
1416 if (uid_eq(cred->uid, tcred->euid) &&
1417 uid_eq(cred->uid, tcred->suid) &&
1418 uid_eq(cred->uid, tcred->uid) &&
1419 gid_eq(cred->gid, tcred->egid) &&
1420 gid_eq(cred->gid, tcred->sgid) &&
1421 gid_eq(cred->gid, tcred->gid))
1422 return 0;
1423 if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1424 return 0;
1425
1426 return -EPERM;
1427}
1428
1429SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1430 const struct rlimit64 __user *, new_rlim,
1431 struct rlimit64 __user *, old_rlim)
1432{
1433 struct rlimit64 old64, new64;
1434 struct rlimit old, new;
1435 struct task_struct *tsk;
1436 int ret;
1437
1438 if (new_rlim) {
1439 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1440 return -EFAULT;
1441 rlim64_to_rlim(&new64, &new);
1442 }
1443
1444 rcu_read_lock();
1445 tsk = pid ? find_task_by_vpid(pid) : current;
1446 if (!tsk) {
1447 rcu_read_unlock();
1448 return -ESRCH;
1449 }
1450 ret = check_prlimit_permission(tsk);
1451 if (ret) {
1452 rcu_read_unlock();
1453 return ret;
1454 }
1455 get_task_struct(tsk);
1456 rcu_read_unlock();
1457
1458 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1459 old_rlim ? &old : NULL);
1460
1461 if (!ret && old_rlim) {
1462 rlim_to_rlim64(&old, &old64);
1463 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1464 ret = -EFAULT;
1465 }
1466
1467 put_task_struct(tsk);
1468 return ret;
1469}
1470
1471SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1472{
1473 struct rlimit new_rlim;
1474
1475 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1476 return -EFAULT;
1477 return do_prlimit(current, resource, &new_rlim, NULL);
1478}
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1514{
1515 r->ru_nvcsw += t->nvcsw;
1516 r->ru_nivcsw += t->nivcsw;
1517 r->ru_minflt += t->min_flt;
1518 r->ru_majflt += t->maj_flt;
1519 r->ru_inblock += task_io_get_inblock(t);
1520 r->ru_oublock += task_io_get_oublock(t);
1521}
1522
1523static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1524{
1525 struct task_struct *t;
1526 unsigned long flags;
1527 cputime_t tgutime, tgstime, utime, stime;
1528 unsigned long maxrss = 0;
1529
1530 memset((char *) r, 0, sizeof *r);
1531 utime = stime = 0;
1532
1533 if (who == RUSAGE_THREAD) {
1534 task_cputime_adjusted(current, &utime, &stime);
1535 accumulate_thread_rusage(p, r);
1536 maxrss = p->signal->maxrss;
1537 goto out;
1538 }
1539
1540 if (!lock_task_sighand(p, &flags))
1541 return;
1542
1543 switch (who) {
1544 case RUSAGE_BOTH:
1545 case RUSAGE_CHILDREN:
1546 utime = p->signal->cutime;
1547 stime = p->signal->cstime;
1548 r->ru_nvcsw = p->signal->cnvcsw;
1549 r->ru_nivcsw = p->signal->cnivcsw;
1550 r->ru_minflt = p->signal->cmin_flt;
1551 r->ru_majflt = p->signal->cmaj_flt;
1552 r->ru_inblock = p->signal->cinblock;
1553 r->ru_oublock = p->signal->coublock;
1554 maxrss = p->signal->cmaxrss;
1555
1556 if (who == RUSAGE_CHILDREN)
1557 break;
1558
1559 case RUSAGE_SELF:
1560 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1561 utime += tgutime;
1562 stime += tgstime;
1563 r->ru_nvcsw += p->signal->nvcsw;
1564 r->ru_nivcsw += p->signal->nivcsw;
1565 r->ru_minflt += p->signal->min_flt;
1566 r->ru_majflt += p->signal->maj_flt;
1567 r->ru_inblock += p->signal->inblock;
1568 r->ru_oublock += p->signal->oublock;
1569 if (maxrss < p->signal->maxrss)
1570 maxrss = p->signal->maxrss;
1571 t = p;
1572 do {
1573 accumulate_thread_rusage(t, r);
1574 } while_each_thread(p, t);
1575 break;
1576
1577 default:
1578 BUG();
1579 }
1580 unlock_task_sighand(p, &flags);
1581
1582out:
1583 cputime_to_timeval(utime, &r->ru_utime);
1584 cputime_to_timeval(stime, &r->ru_stime);
1585
1586 if (who != RUSAGE_CHILDREN) {
1587 struct mm_struct *mm = get_task_mm(p);
1588 if (mm) {
1589 setmax_mm_hiwater_rss(&maxrss, mm);
1590 mmput(mm);
1591 }
1592 }
1593 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024);
1594}
1595
1596int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1597{
1598 struct rusage r;
1599 k_getrusage(p, who, &r);
1600 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1601}
1602
1603SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1604{
1605 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1606 who != RUSAGE_THREAD)
1607 return -EINVAL;
1608 return getrusage(current, who, ru);
1609}
1610
1611#ifdef CONFIG_COMPAT
1612COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1613{
1614 struct rusage r;
1615
1616 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1617 who != RUSAGE_THREAD)
1618 return -EINVAL;
1619
1620 k_getrusage(current, who, &r);
1621 return put_compat_rusage(&r, ru);
1622}
1623#endif
1624
1625SYSCALL_DEFINE1(umask, int, mask)
1626{
1627 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1628 return mask;
1629}
1630
1631static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1632{
1633 struct fd exe;
1634 struct inode *inode;
1635 int err;
1636
1637 exe = fdget(fd);
1638 if (!exe.file)
1639 return -EBADF;
1640
1641 inode = file_inode(exe.file);
1642
1643
1644
1645
1646
1647
1648 err = -EACCES;
1649 if (!S_ISREG(inode->i_mode) ||
1650 exe.file->f_path.mnt->mnt_flags & MNT_NOEXEC)
1651 goto exit;
1652
1653 err = inode_permission(inode, MAY_EXEC);
1654 if (err)
1655 goto exit;
1656
1657 down_write(&mm->mmap_sem);
1658
1659
1660
1661
1662 err = -EBUSY;
1663 if (mm->exe_file) {
1664 struct vm_area_struct *vma;
1665
1666 for (vma = mm->mmap; vma; vma = vma->vm_next)
1667 if (vma->vm_file &&
1668 path_equal(&vma->vm_file->f_path,
1669 &mm->exe_file->f_path))
1670 goto exit_unlock;
1671 }
1672
1673
1674
1675
1676
1677
1678
1679 err = -EPERM;
1680 if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
1681 goto exit_unlock;
1682
1683 err = 0;
1684 set_mm_exe_file(mm, exe.file);
1685exit_unlock:
1686 up_write(&mm->mmap_sem);
1687
1688exit:
1689 fdput(exe);
1690 return err;
1691}
1692
1693static int prctl_set_mm(int opt, unsigned long addr,
1694 unsigned long arg4, unsigned long arg5)
1695{
1696 unsigned long rlim = rlimit(RLIMIT_DATA);
1697 struct mm_struct *mm = current->mm;
1698 struct vm_area_struct *vma;
1699 int error;
1700
1701 if (arg5 || (arg4 && opt != PR_SET_MM_AUXV))
1702 return -EINVAL;
1703
1704 if (!capable(CAP_SYS_RESOURCE))
1705 return -EPERM;
1706
1707 if (opt == PR_SET_MM_EXE_FILE)
1708 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
1709
1710 if (addr >= TASK_SIZE || addr < mmap_min_addr)
1711 return -EINVAL;
1712
1713 error = -EINVAL;
1714
1715 down_read(&mm->mmap_sem);
1716 vma = find_vma(mm, addr);
1717
1718 switch (opt) {
1719 case PR_SET_MM_START_CODE:
1720 mm->start_code = addr;
1721 break;
1722 case PR_SET_MM_END_CODE:
1723 mm->end_code = addr;
1724 break;
1725 case PR_SET_MM_START_DATA:
1726 mm->start_data = addr;
1727 break;
1728 case PR_SET_MM_END_DATA:
1729 mm->end_data = addr;
1730 break;
1731
1732 case PR_SET_MM_START_BRK:
1733 if (addr <= mm->end_data)
1734 goto out;
1735
1736 if (rlim < RLIM_INFINITY &&
1737 (mm->brk - addr) +
1738 (mm->end_data - mm->start_data) > rlim)
1739 goto out;
1740
1741 mm->start_brk = addr;
1742 break;
1743
1744 case PR_SET_MM_BRK:
1745 if (addr <= mm->end_data)
1746 goto out;
1747
1748 if (rlim < RLIM_INFINITY &&
1749 (addr - mm->start_brk) +
1750 (mm->end_data - mm->start_data) > rlim)
1751 goto out;
1752
1753 mm->brk = addr;
1754 break;
1755
1756
1757
1758
1759
1760
1761
1762
1763 case PR_SET_MM_START_STACK:
1764 case PR_SET_MM_ARG_START:
1765 case PR_SET_MM_ARG_END:
1766 case PR_SET_MM_ENV_START:
1767 case PR_SET_MM_ENV_END:
1768 if (!vma) {
1769 error = -EFAULT;
1770 goto out;
1771 }
1772 if (opt == PR_SET_MM_START_STACK)
1773 mm->start_stack = addr;
1774 else if (opt == PR_SET_MM_ARG_START)
1775 mm->arg_start = addr;
1776 else if (opt == PR_SET_MM_ARG_END)
1777 mm->arg_end = addr;
1778 else if (opt == PR_SET_MM_ENV_START)
1779 mm->env_start = addr;
1780 else if (opt == PR_SET_MM_ENV_END)
1781 mm->env_end = addr;
1782 break;
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792 case PR_SET_MM_AUXV: {
1793 unsigned long user_auxv[AT_VECTOR_SIZE];
1794
1795 if (arg4 > sizeof(user_auxv))
1796 goto out;
1797 up_read(&mm->mmap_sem);
1798
1799 if (copy_from_user(user_auxv, (const void __user *)addr, arg4))
1800 return -EFAULT;
1801
1802
1803 user_auxv[AT_VECTOR_SIZE - 2] = 0;
1804 user_auxv[AT_VECTOR_SIZE - 1] = 0;
1805
1806 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1807
1808 task_lock(current);
1809 memcpy(mm->saved_auxv, user_auxv, arg4);
1810 task_unlock(current);
1811
1812 return 0;
1813 }
1814 default:
1815 goto out;
1816 }
1817
1818 error = 0;
1819out:
1820 up_read(&mm->mmap_sem);
1821 return error;
1822}
1823
1824#ifdef CONFIG_CHECKPOINT_RESTORE
1825static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
1826{
1827 return put_user(me->clear_child_tid, tid_addr);
1828}
1829#else
1830static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
1831{
1832 return -EINVAL;
1833}
1834#endif
1835
1836SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
1837 unsigned long, arg4, unsigned long, arg5)
1838{
1839 struct task_struct *me = current;
1840 unsigned char comm[sizeof(me->comm)];
1841 long error;
1842
1843 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
1844 if (error != -ENOSYS)
1845 return error;
1846
1847 error = 0;
1848 switch (option) {
1849 case PR_SET_PDEATHSIG:
1850 if (!valid_signal(arg2)) {
1851 error = -EINVAL;
1852 break;
1853 }
1854 me->pdeath_signal = arg2;
1855 break;
1856 case PR_GET_PDEATHSIG:
1857 error = put_user(me->pdeath_signal, (int __user *)arg2);
1858 break;
1859 case PR_GET_DUMPABLE:
1860 error = get_dumpable(me->mm);
1861 break;
1862 case PR_SET_DUMPABLE:
1863 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
1864 error = -EINVAL;
1865 break;
1866 }
1867 set_dumpable(me->mm, arg2);
1868 break;
1869
1870 case PR_SET_UNALIGN:
1871 error = SET_UNALIGN_CTL(me, arg2);
1872 break;
1873 case PR_GET_UNALIGN:
1874 error = GET_UNALIGN_CTL(me, arg2);
1875 break;
1876 case PR_SET_FPEMU:
1877 error = SET_FPEMU_CTL(me, arg2);
1878 break;
1879 case PR_GET_FPEMU:
1880 error = GET_FPEMU_CTL(me, arg2);
1881 break;
1882 case PR_SET_FPEXC:
1883 error = SET_FPEXC_CTL(me, arg2);
1884 break;
1885 case PR_GET_FPEXC:
1886 error = GET_FPEXC_CTL(me, arg2);
1887 break;
1888 case PR_GET_TIMING:
1889 error = PR_TIMING_STATISTICAL;
1890 break;
1891 case PR_SET_TIMING:
1892 if (arg2 != PR_TIMING_STATISTICAL)
1893 error = -EINVAL;
1894 break;
1895 case PR_SET_NAME:
1896 comm[sizeof(me->comm) - 1] = 0;
1897 if (strncpy_from_user(comm, (char __user *)arg2,
1898 sizeof(me->comm) - 1) < 0)
1899 return -EFAULT;
1900 set_task_comm(me, comm);
1901 proc_comm_connector(me);
1902 break;
1903 case PR_GET_NAME:
1904 get_task_comm(comm, me);
1905 if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
1906 return -EFAULT;
1907 break;
1908 case PR_GET_ENDIAN:
1909 error = GET_ENDIAN(me, arg2);
1910 break;
1911 case PR_SET_ENDIAN:
1912 error = SET_ENDIAN(me, arg2);
1913 break;
1914 case PR_GET_SECCOMP:
1915 error = prctl_get_seccomp();
1916 break;
1917 case PR_SET_SECCOMP:
1918 error = prctl_set_seccomp(arg2, (char __user *)arg3);
1919 break;
1920 case PR_GET_TSC:
1921 error = GET_TSC_CTL(arg2);
1922 break;
1923 case PR_SET_TSC:
1924 error = SET_TSC_CTL(arg2);
1925 break;
1926 case PR_TASK_PERF_EVENTS_DISABLE:
1927 error = perf_event_task_disable();
1928 break;
1929 case PR_TASK_PERF_EVENTS_ENABLE:
1930 error = perf_event_task_enable();
1931 break;
1932 case PR_GET_TIMERSLACK:
1933 error = current->timer_slack_ns;
1934 break;
1935 case PR_SET_TIMERSLACK:
1936 if (arg2 <= 0)
1937 current->timer_slack_ns =
1938 current->default_timer_slack_ns;
1939 else
1940 current->timer_slack_ns = arg2;
1941 break;
1942 case PR_MCE_KILL:
1943 if (arg4 | arg5)
1944 return -EINVAL;
1945 switch (arg2) {
1946 case PR_MCE_KILL_CLEAR:
1947 if (arg3 != 0)
1948 return -EINVAL;
1949 current->flags &= ~PF_MCE_PROCESS;
1950 break;
1951 case PR_MCE_KILL_SET:
1952 current->flags |= PF_MCE_PROCESS;
1953 if (arg3 == PR_MCE_KILL_EARLY)
1954 current->flags |= PF_MCE_EARLY;
1955 else if (arg3 == PR_MCE_KILL_LATE)
1956 current->flags &= ~PF_MCE_EARLY;
1957 else if (arg3 == PR_MCE_KILL_DEFAULT)
1958 current->flags &=
1959 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
1960 else
1961 return -EINVAL;
1962 break;
1963 default:
1964 return -EINVAL;
1965 }
1966 break;
1967 case PR_MCE_KILL_GET:
1968 if (arg2 | arg3 | arg4 | arg5)
1969 return -EINVAL;
1970 if (current->flags & PF_MCE_PROCESS)
1971 error = (current->flags & PF_MCE_EARLY) ?
1972 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
1973 else
1974 error = PR_MCE_KILL_DEFAULT;
1975 break;
1976 case PR_SET_MM:
1977 error = prctl_set_mm(arg2, arg3, arg4, arg5);
1978 break;
1979 case PR_GET_TID_ADDRESS:
1980 error = prctl_get_tid_address(me, (int __user **)arg2);
1981 break;
1982 case PR_SET_CHILD_SUBREAPER:
1983 me->signal->is_child_subreaper = !!arg2;
1984 break;
1985 case PR_GET_CHILD_SUBREAPER:
1986 error = put_user(me->signal->is_child_subreaper,
1987 (int __user *)arg2);
1988 break;
1989 case PR_SET_NO_NEW_PRIVS:
1990 if (arg2 != 1 || arg3 || arg4 || arg5)
1991 return -EINVAL;
1992
1993 current->no_new_privs = 1;
1994 break;
1995 case PR_GET_NO_NEW_PRIVS:
1996 if (arg2 || arg3 || arg4 || arg5)
1997 return -EINVAL;
1998 return current->no_new_privs ? 1 : 0;
1999 case PR_GET_THP_DISABLE:
2000 if (arg2 || arg3 || arg4 || arg5)
2001 return -EINVAL;
2002 error = !!(me->mm->def_flags & VM_NOHUGEPAGE);
2003 break;
2004 case PR_SET_THP_DISABLE:
2005 if (arg3 || arg4 || arg5)
2006 return -EINVAL;
2007 down_write(&me->mm->mmap_sem);
2008 if (arg2)
2009 me->mm->def_flags |= VM_NOHUGEPAGE;
2010 else
2011 me->mm->def_flags &= ~VM_NOHUGEPAGE;
2012 up_write(&me->mm->mmap_sem);
2013 break;
2014 default:
2015 error = -EINVAL;
2016 break;
2017 }
2018 return error;
2019}
2020
2021SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2022 struct getcpu_cache __user *, unused)
2023{
2024 int err = 0;
2025 int cpu = raw_smp_processor_id();
2026 if (cpup)
2027 err |= put_user(cpu, cpup);
2028 if (nodep)
2029 err |= put_user(cpu_to_node(cpu), nodep);
2030 return err ? -EFAULT : 0;
2031}
2032
2033
2034
2035
2036
2037static int do_sysinfo(struct sysinfo *info)
2038{
2039 unsigned long mem_total, sav_total;
2040 unsigned int mem_unit, bitcount;
2041 struct timespec tp;
2042
2043 memset(info, 0, sizeof(struct sysinfo));
2044
2045 get_monotonic_boottime(&tp);
2046 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2047
2048 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2049
2050 info->procs = nr_threads;
2051
2052 si_meminfo(info);
2053 si_swapinfo(info);
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064 mem_total = info->totalram + info->totalswap;
2065 if (mem_total < info->totalram || mem_total < info->totalswap)
2066 goto out;
2067 bitcount = 0;
2068 mem_unit = info->mem_unit;
2069 while (mem_unit > 1) {
2070 bitcount++;
2071 mem_unit >>= 1;
2072 sav_total = mem_total;
2073 mem_total <<= 1;
2074 if (mem_total < sav_total)
2075 goto out;
2076 }
2077
2078
2079
2080
2081
2082
2083
2084
2085 info->mem_unit = 1;
2086 info->totalram <<= bitcount;
2087 info->freeram <<= bitcount;
2088 info->sharedram <<= bitcount;
2089 info->bufferram <<= bitcount;
2090 info->totalswap <<= bitcount;
2091 info->freeswap <<= bitcount;
2092 info->totalhigh <<= bitcount;
2093 info->freehigh <<= bitcount;
2094
2095out:
2096 return 0;
2097}
2098
2099SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2100{
2101 struct sysinfo val;
2102
2103 do_sysinfo(&val);
2104
2105 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2106 return -EFAULT;
2107
2108 return 0;
2109}
2110
2111#ifdef CONFIG_COMPAT
2112struct compat_sysinfo {
2113 s32 uptime;
2114 u32 loads[3];
2115 u32 totalram;
2116 u32 freeram;
2117 u32 sharedram;
2118 u32 bufferram;
2119 u32 totalswap;
2120 u32 freeswap;
2121 u16 procs;
2122 u16 pad;
2123 u32 totalhigh;
2124 u32 freehigh;
2125 u32 mem_unit;
2126 char _f[20-2*sizeof(u32)-sizeof(int)];
2127};
2128
2129COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2130{
2131 struct sysinfo s;
2132
2133 do_sysinfo(&s);
2134
2135
2136
2137
2138 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
2139 int bitcount = 0;
2140
2141 while (s.mem_unit < PAGE_SIZE) {
2142 s.mem_unit <<= 1;
2143 bitcount++;
2144 }
2145
2146 s.totalram >>= bitcount;
2147 s.freeram >>= bitcount;
2148 s.sharedram >>= bitcount;
2149 s.bufferram >>= bitcount;
2150 s.totalswap >>= bitcount;
2151 s.freeswap >>= bitcount;
2152 s.totalhigh >>= bitcount;
2153 s.freehigh >>= bitcount;
2154 }
2155
2156 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
2157 __put_user(s.uptime, &info->uptime) ||
2158 __put_user(s.loads[0], &info->loads[0]) ||
2159 __put_user(s.loads[1], &info->loads[1]) ||
2160 __put_user(s.loads[2], &info->loads[2]) ||
2161 __put_user(s.totalram, &info->totalram) ||
2162 __put_user(s.freeram, &info->freeram) ||
2163 __put_user(s.sharedram, &info->sharedram) ||
2164 __put_user(s.bufferram, &info->bufferram) ||
2165 __put_user(s.totalswap, &info->totalswap) ||
2166 __put_user(s.freeswap, &info->freeswap) ||
2167 __put_user(s.procs, &info->procs) ||
2168 __put_user(s.totalhigh, &info->totalhigh) ||
2169 __put_user(s.freehigh, &info->freehigh) ||
2170 __put_user(s.mem_unit, &info->mem_unit))
2171 return -EFAULT;
2172
2173 return 0;
2174}
2175#endif
2176