1
2
3
4
5
6
7#include <linux/mm.h>
8#include <linux/slab.h>
9#include <linux/interrupt.h>
10#include <linux/module.h>
11#include <linux/capability.h>
12#include <linux/completion.h>
13#include <linux/personality.h>
14#include <linux/tty.h>
15#include <linux/iocontext.h>
16#include <linux/key.h>
17#include <linux/security.h>
18#include <linux/cpu.h>
19#include <linux/acct.h>
20#include <linux/tsacct_kern.h>
21#include <linux/file.h>
22#include <linux/fdtable.h>
23#include <linux/freezer.h>
24#include <linux/binfmts.h>
25#include <linux/nsproxy.h>
26#include <linux/pid_namespace.h>
27#include <linux/ptrace.h>
28#include <linux/profile.h>
29#include <linux/mount.h>
30#include <linux/proc_fs.h>
31#include <linux/kthread.h>
32#include <linux/mempolicy.h>
33#include <linux/taskstats_kern.h>
34#include <linux/delayacct.h>
35#include <linux/cgroup.h>
36#include <linux/syscalls.h>
37#include <linux/signal.h>
38#include <linux/posix-timers.h>
39#include <linux/cn_proc.h>
40#include <linux/mutex.h>
41#include <linux/futex.h>
42#include <linux/pipe_fs_i.h>
43#include <linux/audit.h>
44#include <linux/resource.h>
45#include <linux/blkdev.h>
46#include <linux/task_io_accounting_ops.h>
47#include <linux/tracehook.h>
48#include <linux/fs_struct.h>
49#include <linux/init_task.h>
50#include <linux/perf_event.h>
51#include <trace/events/sched.h>
52#include <linux/hw_breakpoint.h>
53#include <linux/oom.h>
54#include <linux/writeback.h>
55#include <linux/shm.h>
56
57#include <asm/uaccess.h>
58#include <asm/unistd.h>
59#include <asm/pgtable.h>
60#include <asm/mmu_context.h>
61
62static void exit_mm(struct task_struct *tsk);
63
64static void __unhash_process(struct task_struct *p, bool group_dead)
65{
66 nr_threads--;
67 detach_pid(p, PIDTYPE_PID);
68 if (group_dead) {
69 detach_pid(p, PIDTYPE_PGID);
70 detach_pid(p, PIDTYPE_SID);
71
72 list_del_rcu(&p->tasks);
73 list_del_init(&p->sibling);
74 __this_cpu_dec(process_counts);
75 }
76 list_del_rcu(&p->thread_group);
77 list_del_rcu(&p->thread_node);
78}
79
80
81
82
83static void __exit_signal(struct task_struct *tsk)
84{
85 struct signal_struct *sig = tsk->signal;
86 bool group_dead = thread_group_leader(tsk);
87 struct sighand_struct *sighand;
88 struct tty_struct *uninitialized_var(tty);
89 cputime_t utime, stime;
90
91 sighand = rcu_dereference_check(tsk->sighand,
92 lockdep_tasklist_lock_is_held());
93 spin_lock(&sighand->siglock);
94
95 posix_cpu_timers_exit(tsk);
96 if (group_dead) {
97 posix_cpu_timers_exit_group(tsk);
98 tty = sig->tty;
99 sig->tty = NULL;
100 } else {
101
102
103
104
105
106 if (unlikely(has_group_leader_pid(tsk)))
107 posix_cpu_timers_exit_group(tsk);
108
109
110
111
112
113 if (sig->notify_count > 0 && !--sig->notify_count)
114 wake_up_process(sig->group_exit_task);
115
116 if (tsk == sig->curr_target)
117 sig->curr_target = next_thread(tsk);
118 }
119
120
121
122
123
124
125
126 task_cputime(tsk, &utime, &stime);
127 write_seqlock(&sig->stats_lock);
128 sig->utime += utime;
129 sig->stime += stime;
130 sig->gtime += task_gtime(tsk);
131 sig->min_flt += tsk->min_flt;
132 sig->maj_flt += tsk->maj_flt;
133 sig->nvcsw += tsk->nvcsw;
134 sig->nivcsw += tsk->nivcsw;
135 sig->inblock += task_io_get_inblock(tsk);
136 sig->oublock += task_io_get_oublock(tsk);
137 task_io_accounting_add(&sig->ioac, &tsk->ioac);
138 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
139 sig->nr_threads--;
140 __unhash_process(tsk, group_dead);
141 write_sequnlock(&sig->stats_lock);
142
143
144
145
146
147 flush_sigqueue(&tsk->pending);
148 tsk->sighand = NULL;
149 spin_unlock(&sighand->siglock);
150
151 __cleanup_sighand(sighand);
152 clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
153 if (group_dead) {
154 flush_sigqueue(&sig->shared_pending);
155 tty_kref_put(tty);
156 }
157}
158
159static void delayed_put_task_struct(struct rcu_head *rhp)
160{
161 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
162
163 perf_event_delayed_put(tsk);
164 trace_sched_process_free(tsk);
165 put_task_struct(tsk);
166}
167
168
169void release_task(struct task_struct *p)
170{
171 struct task_struct *leader;
172 int zap_leader;
173repeat:
174
175
176 rcu_read_lock();
177 atomic_dec(&__task_cred(p)->user->processes);
178 rcu_read_unlock();
179
180 proc_flush_task(p);
181
182 write_lock_irq(&tasklist_lock);
183 ptrace_release_task(p);
184 __exit_signal(p);
185
186
187
188
189
190
191 zap_leader = 0;
192 leader = p->group_leader;
193 if (leader != p && thread_group_empty(leader)
194 && leader->exit_state == EXIT_ZOMBIE) {
195
196
197
198
199
200 zap_leader = do_notify_parent(leader, leader->exit_signal);
201 if (zap_leader)
202 leader->exit_state = EXIT_DEAD;
203 }
204
205 write_unlock_irq(&tasklist_lock);
206 release_thread(p);
207 call_rcu(&p->rcu, delayed_put_task_struct);
208
209 p = leader;
210 if (unlikely(zap_leader))
211 goto repeat;
212}
213
214
215
216
217
218
219
220
221
222static int will_become_orphaned_pgrp(struct pid *pgrp,
223 struct task_struct *ignored_task)
224{
225 struct task_struct *p;
226
227 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
228 if ((p == ignored_task) ||
229 (p->exit_state && thread_group_empty(p)) ||
230 is_global_init(p->real_parent))
231 continue;
232
233 if (task_pgrp(p->real_parent) != pgrp &&
234 task_session(p->real_parent) == task_session(p))
235 return 0;
236 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
237
238 return 1;
239}
240
241int is_current_pgrp_orphaned(void)
242{
243 int retval;
244
245 read_lock(&tasklist_lock);
246 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
247 read_unlock(&tasklist_lock);
248
249 return retval;
250}
251
252static bool has_stopped_jobs(struct pid *pgrp)
253{
254 struct task_struct *p;
255
256 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
257 if (p->signal->flags & SIGNAL_STOP_STOPPED)
258 return true;
259 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
260
261 return false;
262}
263
264
265
266
267
268
269static void
270kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
271{
272 struct pid *pgrp = task_pgrp(tsk);
273 struct task_struct *ignored_task = tsk;
274
275 if (!parent)
276
277
278
279 parent = tsk->real_parent;
280 else
281
282
283
284 ignored_task = NULL;
285
286 if (task_pgrp(parent) != pgrp &&
287 task_session(parent) == task_session(tsk) &&
288 will_become_orphaned_pgrp(pgrp, ignored_task) &&
289 has_stopped_jobs(pgrp)) {
290 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
291 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
292 }
293}
294
295#ifdef CONFIG_MEMCG
296
297
298
299void mm_update_next_owner(struct mm_struct *mm)
300{
301 struct task_struct *c, *g, *p = current;
302
303retry:
304
305
306
307
308 if (mm->owner != p)
309 return;
310
311
312
313
314
315 if (atomic_read(&mm->mm_users) <= 1) {
316 mm->owner = NULL;
317 return;
318 }
319
320 read_lock(&tasklist_lock);
321
322
323
324 list_for_each_entry(c, &p->children, sibling) {
325 if (c->mm == mm)
326 goto assign_new_owner;
327 }
328
329
330
331
332 list_for_each_entry(c, &p->real_parent->children, sibling) {
333 if (c->mm == mm)
334 goto assign_new_owner;
335 }
336
337
338
339
340 for_each_process(g) {
341 if (g->flags & PF_KTHREAD)
342 continue;
343 for_each_thread(g, c) {
344 if (c->mm == mm)
345 goto assign_new_owner;
346 if (c->mm)
347 break;
348 }
349 }
350 read_unlock(&tasklist_lock);
351
352
353
354
355
356 mm->owner = NULL;
357 return;
358
359assign_new_owner:
360 BUG_ON(c == p);
361 get_task_struct(c);
362
363
364
365
366 task_lock(c);
367
368
369
370
371 read_unlock(&tasklist_lock);
372 if (c->mm != mm) {
373 task_unlock(c);
374 put_task_struct(c);
375 goto retry;
376 }
377 mm->owner = c;
378 task_unlock(c);
379 put_task_struct(c);
380}
381#endif
382
383
384
385
386
387static void exit_mm(struct task_struct *tsk)
388{
389 struct mm_struct *mm = tsk->mm;
390 struct core_state *core_state;
391
392 mm_release(tsk, mm);
393 if (!mm)
394 return;
395 sync_mm_rss(mm);
396
397
398
399
400
401
402
403 down_read(&mm->mmap_sem);
404 core_state = mm->core_state;
405 if (core_state) {
406 struct core_thread self;
407
408 up_read(&mm->mmap_sem);
409
410 self.task = tsk;
411 self.next = xchg(&core_state->dumper.next, &self);
412
413
414
415
416 if (atomic_dec_and_test(&core_state->nr_threads))
417 complete(&core_state->startup);
418
419 for (;;) {
420 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
421 if (!self.task)
422 break;
423 freezable_schedule();
424 }
425 __set_task_state(tsk, TASK_RUNNING);
426 down_read(&mm->mmap_sem);
427 }
428 atomic_inc(&mm->mm_count);
429 BUG_ON(mm != tsk->active_mm);
430
431 task_lock(tsk);
432 tsk->mm = NULL;
433 up_read(&mm->mmap_sem);
434 enter_lazy_tlb(mm, current);
435 task_unlock(tsk);
436 mm_update_next_owner(mm);
437 mmput(mm);
438 if (test_thread_flag(TIF_MEMDIE))
439 exit_oom_victim();
440}
441
442static struct task_struct *find_alive_thread(struct task_struct *p)
443{
444 struct task_struct *t;
445
446 for_each_thread(p, t) {
447 if (!(t->flags & PF_EXITING))
448 return t;
449 }
450 return NULL;
451}
452
453static struct task_struct *find_child_reaper(struct task_struct *father)
454 __releases(&tasklist_lock)
455 __acquires(&tasklist_lock)
456{
457 struct pid_namespace *pid_ns = task_active_pid_ns(father);
458 struct task_struct *reaper = pid_ns->child_reaper;
459
460 if (likely(reaper != father))
461 return reaper;
462
463 reaper = find_alive_thread(father);
464 if (reaper) {
465 pid_ns->child_reaper = reaper;
466 return reaper;
467 }
468
469 write_unlock_irq(&tasklist_lock);
470 if (unlikely(pid_ns == &init_pid_ns)) {
471 panic("Attempted to kill init! exitcode=0x%08x\n",
472 father->signal->group_exit_code ?: father->exit_code);
473 }
474 zap_pid_ns_processes(pid_ns);
475 write_lock_irq(&tasklist_lock);
476
477 return father;
478}
479
480
481
482
483
484
485
486
487static struct task_struct *find_new_reaper(struct task_struct *father,
488 struct task_struct *child_reaper)
489{
490 struct task_struct *thread, *reaper;
491
492 thread = find_alive_thread(father);
493 if (thread)
494 return thread;
495
496 if (father->signal->has_child_subreaper) {
497
498
499
500
501
502 for (reaper = father;
503 !same_thread_group(reaper, child_reaper);
504 reaper = reaper->real_parent) {
505
506 if (reaper == &init_task)
507 break;
508 if (!reaper->signal->is_child_subreaper)
509 continue;
510 thread = find_alive_thread(reaper);
511 if (thread)
512 return thread;
513 }
514 }
515
516 return child_reaper;
517}
518
519
520
521
522static void reparent_leader(struct task_struct *father, struct task_struct *p,
523 struct list_head *dead)
524{
525 if (unlikely(p->exit_state == EXIT_DEAD))
526 return;
527
528
529 p->exit_signal = SIGCHLD;
530
531
532 if (!p->ptrace &&
533 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
534 if (do_notify_parent(p, p->exit_signal)) {
535 p->exit_state = EXIT_DEAD;
536 list_add(&p->ptrace_entry, dead);
537 }
538 }
539
540 kill_orphaned_pgrp(p, father);
541}
542
543
544
545
546
547
548
549
550
551static void forget_original_parent(struct task_struct *father,
552 struct list_head *dead)
553{
554 struct task_struct *p, *t, *reaper;
555
556 if (unlikely(!list_empty(&father->ptraced)))
557 exit_ptrace(father, dead);
558
559
560 reaper = find_child_reaper(father);
561 if (list_empty(&father->children))
562 return;
563
564 reaper = find_new_reaper(father, reaper);
565 list_for_each_entry(p, &father->children, sibling) {
566 for_each_thread(p, t) {
567 t->real_parent = reaper;
568 BUG_ON((!t->ptrace) != (t->parent == father));
569 if (likely(!t->ptrace))
570 t->parent = t->real_parent;
571 if (t->pdeath_signal)
572 group_send_sig_info(t->pdeath_signal,
573 SEND_SIG_NOINFO, t);
574 }
575
576
577
578
579 if (!same_thread_group(reaper, father))
580 reparent_leader(father, p, dead);
581 }
582 list_splice_tail_init(&father->children, &reaper->children);
583}
584
585
586
587
588
589static void exit_notify(struct task_struct *tsk, int group_dead)
590{
591 bool autoreap;
592 struct task_struct *p, *n;
593 LIST_HEAD(dead);
594
595 write_lock_irq(&tasklist_lock);
596 forget_original_parent(tsk, &dead);
597
598 if (group_dead)
599 kill_orphaned_pgrp(tsk->group_leader, NULL);
600
601 if (unlikely(tsk->ptrace)) {
602 int sig = thread_group_leader(tsk) &&
603 thread_group_empty(tsk) &&
604 !ptrace_reparented(tsk) ?
605 tsk->exit_signal : SIGCHLD;
606 autoreap = do_notify_parent(tsk, sig);
607 } else if (thread_group_leader(tsk)) {
608 autoreap = thread_group_empty(tsk) &&
609 do_notify_parent(tsk, tsk->exit_signal);
610 } else {
611 autoreap = true;
612 }
613
614 tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
615 if (tsk->exit_state == EXIT_DEAD)
616 list_add(&tsk->ptrace_entry, &dead);
617
618
619 if (unlikely(tsk->signal->notify_count < 0))
620 wake_up_process(tsk->signal->group_exit_task);
621 write_unlock_irq(&tasklist_lock);
622
623 list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
624 list_del_init(&p->ptrace_entry);
625 release_task(p);
626 }
627}
628
629#ifdef CONFIG_DEBUG_STACK_USAGE
630static void check_stack_usage(void)
631{
632 static DEFINE_SPINLOCK(low_water_lock);
633 static int lowest_to_date = THREAD_SIZE;
634 unsigned long free;
635
636 free = stack_not_used(current);
637
638 if (free >= lowest_to_date)
639 return;
640
641 spin_lock(&low_water_lock);
642 if (free < lowest_to_date) {
643 pr_warn("%s (%d) used greatest stack depth: %lu bytes left\n",
644 current->comm, task_pid_nr(current), free);
645 lowest_to_date = free;
646 }
647 spin_unlock(&low_water_lock);
648}
649#else
650static inline void check_stack_usage(void) {}
651#endif
652
653void do_exit(long code)
654{
655 struct task_struct *tsk = current;
656 int group_dead;
657 TASKS_RCU(int tasks_rcu_i);
658
659 profile_task_exit(tsk);
660
661 WARN_ON(blk_needs_flush_plug(tsk));
662
663 if (unlikely(in_interrupt()))
664 panic("Aiee, killing interrupt handler!");
665 if (unlikely(!tsk->pid))
666 panic("Attempted to kill the idle task!");
667
668
669
670
671
672
673
674
675 set_fs(USER_DS);
676
677 ptrace_event(PTRACE_EVENT_EXIT, code);
678
679 validate_creds_for_do_exit(tsk);
680
681
682
683
684
685 if (unlikely(tsk->flags & PF_EXITING)) {
686 pr_alert("Fixing recursive fault but reboot is needed!\n");
687
688
689
690
691
692
693
694
695
696 tsk->flags |= PF_EXITPIDONE;
697 set_current_state(TASK_UNINTERRUPTIBLE);
698 schedule();
699 }
700
701 exit_signals(tsk);
702
703
704
705
706 smp_mb();
707 raw_spin_unlock_wait(&tsk->pi_lock);
708
709 if (unlikely(in_atomic()))
710 pr_info("note: %s[%d] exited with preempt_count %d\n",
711 current->comm, task_pid_nr(current),
712 preempt_count());
713
714
715 if (tsk->mm)
716 sync_mm_rss(tsk->mm);
717 acct_update_integrals(tsk);
718 group_dead = atomic_dec_and_test(&tsk->signal->live);
719 if (group_dead) {
720 hrtimer_cancel(&tsk->signal->real_timer);
721 exit_itimers(tsk->signal);
722 if (tsk->mm)
723 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
724 }
725 acct_collect(code, group_dead);
726 if (group_dead)
727 tty_audit_exit();
728 audit_free(tsk);
729
730 tsk->exit_code = code;
731 taskstats_exit(tsk, group_dead);
732
733 exit_mm(tsk);
734
735 if (group_dead)
736 acct_process();
737 trace_sched_process_exit(tsk);
738
739 exit_sem(tsk);
740 exit_shm(tsk);
741 exit_files(tsk);
742 exit_fs(tsk);
743 if (group_dead)
744 disassociate_ctty(1);
745 exit_task_namespaces(tsk);
746 exit_task_work(tsk);
747 exit_thread();
748
749
750
751
752
753
754
755 perf_event_exit_task(tsk);
756
757 cgroup_exit(tsk);
758
759
760
761
762 flush_ptrace_hw_breakpoint(tsk);
763
764 TASKS_RCU(tasks_rcu_i = __srcu_read_lock(&tasks_rcu_exit_srcu));
765 exit_notify(tsk, group_dead);
766 proc_exit_connector(tsk);
767#ifdef CONFIG_NUMA
768 task_lock(tsk);
769 mpol_put(tsk->mempolicy);
770 tsk->mempolicy = NULL;
771 task_unlock(tsk);
772#endif
773#ifdef CONFIG_FUTEX
774 if (unlikely(current->pi_state_cache))
775 kfree(current->pi_state_cache);
776#endif
777
778
779
780 debug_check_no_locks_held();
781
782
783
784
785
786 tsk->flags |= PF_EXITPIDONE;
787
788 if (tsk->io_context)
789 exit_io_context(tsk);
790
791 if (tsk->splice_pipe)
792 free_pipe_info(tsk->splice_pipe);
793
794 if (tsk->task_frag.page)
795 put_page(tsk->task_frag.page);
796
797 validate_creds_for_do_exit(tsk);
798
799 check_stack_usage();
800 preempt_disable();
801 if (tsk->nr_dirtied)
802 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
803 exit_rcu();
804 TASKS_RCU(__srcu_read_unlock(&tasks_rcu_exit_srcu, tasks_rcu_i));
805
806
807
808
809
810
811
812
813
814
815
816
817
818 smp_mb();
819 raw_spin_unlock_wait(&tsk->pi_lock);
820
821
822 tsk->state = TASK_DEAD;
823 tsk->flags |= PF_NOFREEZE;
824 schedule();
825 BUG();
826
827 for (;;)
828 cpu_relax();
829}
830EXPORT_SYMBOL_GPL(do_exit);
831
832void complete_and_exit(struct completion *comp, long code)
833{
834 if (comp)
835 complete(comp);
836
837 do_exit(code);
838}
839EXPORT_SYMBOL(complete_and_exit);
840
841SYSCALL_DEFINE1(exit, int, error_code)
842{
843 do_exit((error_code&0xff)<<8);
844}
845
846
847
848
849
850void
851do_group_exit(int exit_code)
852{
853 struct signal_struct *sig = current->signal;
854
855 BUG_ON(exit_code & 0x80);
856
857 if (signal_group_exit(sig))
858 exit_code = sig->group_exit_code;
859 else if (!thread_group_empty(current)) {
860 struct sighand_struct *const sighand = current->sighand;
861
862 spin_lock_irq(&sighand->siglock);
863 if (signal_group_exit(sig))
864
865 exit_code = sig->group_exit_code;
866 else {
867 sig->group_exit_code = exit_code;
868 sig->flags = SIGNAL_GROUP_EXIT;
869 zap_other_threads(current);
870 }
871 spin_unlock_irq(&sighand->siglock);
872 }
873
874 do_exit(exit_code);
875
876}
877
878
879
880
881
882
883SYSCALL_DEFINE1(exit_group, int, error_code)
884{
885 do_group_exit((error_code & 0xff) << 8);
886
887 return 0;
888}
889
890struct wait_opts {
891 enum pid_type wo_type;
892 int wo_flags;
893 struct pid *wo_pid;
894
895 struct siginfo __user *wo_info;
896 int __user *wo_stat;
897 struct rusage __user *wo_rusage;
898
899 wait_queue_t child_wait;
900 int notask_error;
901};
902
903static inline
904struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
905{
906 if (type != PIDTYPE_PID)
907 task = task->group_leader;
908 return task->pids[type].pid;
909}
910
911static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
912{
913 return wo->wo_type == PIDTYPE_MAX ||
914 task_pid_type(p, wo->wo_type) == wo->wo_pid;
915}
916
917static int eligible_child(struct wait_opts *wo, struct task_struct *p)
918{
919 if (!eligible_pid(wo, p))
920 return 0;
921
922
923
924
925
926 if (((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
927 && !(wo->wo_flags & __WALL))
928 return 0;
929
930 return 1;
931}
932
933static int wait_noreap_copyout(struct wait_opts *wo, struct task_struct *p,
934 pid_t pid, uid_t uid, int why, int status)
935{
936 struct siginfo __user *infop;
937 int retval = wo->wo_rusage
938 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
939
940 put_task_struct(p);
941 infop = wo->wo_info;
942 if (infop) {
943 if (!retval)
944 retval = put_user(SIGCHLD, &infop->si_signo);
945 if (!retval)
946 retval = put_user(0, &infop->si_errno);
947 if (!retval)
948 retval = put_user((short)why, &infop->si_code);
949 if (!retval)
950 retval = put_user(pid, &infop->si_pid);
951 if (!retval)
952 retval = put_user(uid, &infop->si_uid);
953 if (!retval)
954 retval = put_user(status, &infop->si_status);
955 }
956 if (!retval)
957 retval = pid;
958 return retval;
959}
960
961
962
963
964
965
966
967static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
968{
969 int state, retval, status;
970 pid_t pid = task_pid_vnr(p);
971 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
972 struct siginfo __user *infop;
973
974 if (!likely(wo->wo_flags & WEXITED))
975 return 0;
976
977 if (unlikely(wo->wo_flags & WNOWAIT)) {
978 int exit_code = p->exit_code;
979 int why;
980
981 get_task_struct(p);
982 read_unlock(&tasklist_lock);
983 sched_annotate_sleep();
984
985 if ((exit_code & 0x7f) == 0) {
986 why = CLD_EXITED;
987 status = exit_code >> 8;
988 } else {
989 why = (exit_code & 0x80) ? CLD_DUMPED : CLD_KILLED;
990 status = exit_code & 0x7f;
991 }
992 return wait_noreap_copyout(wo, p, pid, uid, why, status);
993 }
994
995
996
997 state = (ptrace_reparented(p) && thread_group_leader(p)) ?
998 EXIT_TRACE : EXIT_DEAD;
999 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
1000 return 0;
1001
1002
1003
1004 read_unlock(&tasklist_lock);
1005 sched_annotate_sleep();
1006
1007
1008
1009
1010 if (state == EXIT_DEAD && thread_group_leader(p)) {
1011 struct signal_struct *sig = p->signal;
1012 struct signal_struct *psig = current->signal;
1013 unsigned long maxrss;
1014 cputime_t tgutime, tgstime;
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1037 spin_lock_irq(¤t->sighand->siglock);
1038 write_seqlock(&psig->stats_lock);
1039 psig->cutime += tgutime + sig->cutime;
1040 psig->cstime += tgstime + sig->cstime;
1041 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
1042 psig->cmin_flt +=
1043 p->min_flt + sig->min_flt + sig->cmin_flt;
1044 psig->cmaj_flt +=
1045 p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1046 psig->cnvcsw +=
1047 p->nvcsw + sig->nvcsw + sig->cnvcsw;
1048 psig->cnivcsw +=
1049 p->nivcsw + sig->nivcsw + sig->cnivcsw;
1050 psig->cinblock +=
1051 task_io_get_inblock(p) +
1052 sig->inblock + sig->cinblock;
1053 psig->coublock +=
1054 task_io_get_oublock(p) +
1055 sig->oublock + sig->coublock;
1056 maxrss = max(sig->maxrss, sig->cmaxrss);
1057 if (psig->cmaxrss < maxrss)
1058 psig->cmaxrss = maxrss;
1059 task_io_accounting_add(&psig->ioac, &p->ioac);
1060 task_io_accounting_add(&psig->ioac, &sig->ioac);
1061 write_sequnlock(&psig->stats_lock);
1062 spin_unlock_irq(¤t->sighand->siglock);
1063 }
1064
1065 retval = wo->wo_rusage
1066 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
1067 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1068 ? p->signal->group_exit_code : p->exit_code;
1069 if (!retval && wo->wo_stat)
1070 retval = put_user(status, wo->wo_stat);
1071
1072 infop = wo->wo_info;
1073 if (!retval && infop)
1074 retval = put_user(SIGCHLD, &infop->si_signo);
1075 if (!retval && infop)
1076 retval = put_user(0, &infop->si_errno);
1077 if (!retval && infop) {
1078 int why;
1079
1080 if ((status & 0x7f) == 0) {
1081 why = CLD_EXITED;
1082 status >>= 8;
1083 } else {
1084 why = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1085 status &= 0x7f;
1086 }
1087 retval = put_user((short)why, &infop->si_code);
1088 if (!retval)
1089 retval = put_user(status, &infop->si_status);
1090 }
1091 if (!retval && infop)
1092 retval = put_user(pid, &infop->si_pid);
1093 if (!retval && infop)
1094 retval = put_user(uid, &infop->si_uid);
1095 if (!retval)
1096 retval = pid;
1097
1098 if (state == EXIT_TRACE) {
1099 write_lock_irq(&tasklist_lock);
1100
1101 ptrace_unlink(p);
1102
1103
1104 state = EXIT_ZOMBIE;
1105 if (do_notify_parent(p, p->exit_signal))
1106 state = EXIT_DEAD;
1107 p->exit_state = state;
1108 write_unlock_irq(&tasklist_lock);
1109 }
1110 if (state == EXIT_DEAD)
1111 release_task(p);
1112
1113 return retval;
1114}
1115
1116static int *task_stopped_code(struct task_struct *p, bool ptrace)
1117{
1118 if (ptrace) {
1119 if (task_is_stopped_or_traced(p) &&
1120 !(p->jobctl & JOBCTL_LISTENING))
1121 return &p->exit_code;
1122 } else {
1123 if (p->signal->flags & SIGNAL_STOP_STOPPED)
1124 return &p->signal->group_exit_code;
1125 }
1126 return NULL;
1127}
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147static int wait_task_stopped(struct wait_opts *wo,
1148 int ptrace, struct task_struct *p)
1149{
1150 struct siginfo __user *infop;
1151 int retval, exit_code, *p_code, why;
1152 uid_t uid = 0;
1153 pid_t pid;
1154
1155
1156
1157
1158 if (!ptrace && !(wo->wo_flags & WUNTRACED))
1159 return 0;
1160
1161 if (!task_stopped_code(p, ptrace))
1162 return 0;
1163
1164 exit_code = 0;
1165 spin_lock_irq(&p->sighand->siglock);
1166
1167 p_code = task_stopped_code(p, ptrace);
1168 if (unlikely(!p_code))
1169 goto unlock_sig;
1170
1171 exit_code = *p_code;
1172 if (!exit_code)
1173 goto unlock_sig;
1174
1175 if (!unlikely(wo->wo_flags & WNOWAIT))
1176 *p_code = 0;
1177
1178 uid = from_kuid_munged(current_user_ns(), task_uid(p));
1179unlock_sig:
1180 spin_unlock_irq(&p->sighand->siglock);
1181 if (!exit_code)
1182 return 0;
1183
1184
1185
1186
1187
1188
1189
1190
1191 get_task_struct(p);
1192 pid = task_pid_vnr(p);
1193 why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
1194 read_unlock(&tasklist_lock);
1195 sched_annotate_sleep();
1196
1197 if (unlikely(wo->wo_flags & WNOWAIT))
1198 return wait_noreap_copyout(wo, p, pid, uid, why, exit_code);
1199
1200 retval = wo->wo_rusage
1201 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
1202 if (!retval && wo->wo_stat)
1203 retval = put_user((exit_code << 8) | 0x7f, wo->wo_stat);
1204
1205 infop = wo->wo_info;
1206 if (!retval && infop)
1207 retval = put_user(SIGCHLD, &infop->si_signo);
1208 if (!retval && infop)
1209 retval = put_user(0, &infop->si_errno);
1210 if (!retval && infop)
1211 retval = put_user((short)why, &infop->si_code);
1212 if (!retval && infop)
1213 retval = put_user(exit_code, &infop->si_status);
1214 if (!retval && infop)
1215 retval = put_user(pid, &infop->si_pid);
1216 if (!retval && infop)
1217 retval = put_user(uid, &infop->si_uid);
1218 if (!retval)
1219 retval = pid;
1220 put_task_struct(p);
1221
1222 BUG_ON(!retval);
1223 return retval;
1224}
1225
1226
1227
1228
1229
1230
1231
1232static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1233{
1234 int retval;
1235 pid_t pid;
1236 uid_t uid;
1237
1238 if (!unlikely(wo->wo_flags & WCONTINUED))
1239 return 0;
1240
1241 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1242 return 0;
1243
1244 spin_lock_irq(&p->sighand->siglock);
1245
1246 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1247 spin_unlock_irq(&p->sighand->siglock);
1248 return 0;
1249 }
1250 if (!unlikely(wo->wo_flags & WNOWAIT))
1251 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1252 uid = from_kuid_munged(current_user_ns(), task_uid(p));
1253 spin_unlock_irq(&p->sighand->siglock);
1254
1255 pid = task_pid_vnr(p);
1256 get_task_struct(p);
1257 read_unlock(&tasklist_lock);
1258 sched_annotate_sleep();
1259
1260 if (!wo->wo_info) {
1261 retval = wo->wo_rusage
1262 ? getrusage(p, RUSAGE_BOTH, wo->wo_rusage) : 0;
1263 put_task_struct(p);
1264 if (!retval && wo->wo_stat)
1265 retval = put_user(0xffff, wo->wo_stat);
1266 if (!retval)
1267 retval = pid;
1268 } else {
1269 retval = wait_noreap_copyout(wo, p, pid, uid,
1270 CLD_CONTINUED, SIGCONT);
1271 BUG_ON(retval == 0);
1272 }
1273
1274 return retval;
1275}
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286static int wait_consider_task(struct wait_opts *wo, int ptrace,
1287 struct task_struct *p)
1288{
1289
1290
1291
1292
1293
1294 int exit_state = ACCESS_ONCE(p->exit_state);
1295 int ret;
1296
1297 if (unlikely(exit_state == EXIT_DEAD))
1298 return 0;
1299
1300 ret = eligible_child(wo, p);
1301 if (!ret)
1302 return ret;
1303
1304 ret = security_task_wait(p);
1305 if (unlikely(ret < 0)) {
1306
1307
1308
1309
1310
1311
1312
1313 if (wo->notask_error)
1314 wo->notask_error = ret;
1315 return 0;
1316 }
1317
1318 if (unlikely(exit_state == EXIT_TRACE)) {
1319
1320
1321
1322
1323 if (likely(!ptrace))
1324 wo->notask_error = 0;
1325 return 0;
1326 }
1327
1328 if (likely(!ptrace) && unlikely(p->ptrace)) {
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340 if (!ptrace_reparented(p))
1341 ptrace = 1;
1342 }
1343
1344
1345 if (exit_state == EXIT_ZOMBIE) {
1346
1347 if (!delay_group_leader(p)) {
1348
1349
1350
1351
1352
1353 if (unlikely(ptrace) || likely(!p->ptrace))
1354 return wait_task_zombie(wo, p);
1355 }
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
1378 wo->notask_error = 0;
1379 } else {
1380
1381
1382
1383
1384 wo->notask_error = 0;
1385 }
1386
1387
1388
1389
1390
1391 ret = wait_task_stopped(wo, ptrace, p);
1392 if (ret)
1393 return ret;
1394
1395
1396
1397
1398
1399
1400 return wait_task_continued(wo, p);
1401}
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
1413{
1414 struct task_struct *p;
1415
1416 list_for_each_entry(p, &tsk->children, sibling) {
1417 int ret = wait_consider_task(wo, 0, p);
1418
1419 if (ret)
1420 return ret;
1421 }
1422
1423 return 0;
1424}
1425
1426static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
1427{
1428 struct task_struct *p;
1429
1430 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
1431 int ret = wait_consider_task(wo, 1, p);
1432
1433 if (ret)
1434 return ret;
1435 }
1436
1437 return 0;
1438}
1439
1440static int child_wait_callback(wait_queue_t *wait, unsigned mode,
1441 int sync, void *key)
1442{
1443 struct wait_opts *wo = container_of(wait, struct wait_opts,
1444 child_wait);
1445 struct task_struct *p = key;
1446
1447 if (!eligible_pid(wo, p))
1448 return 0;
1449
1450 if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
1451 return 0;
1452
1453 return default_wake_function(wait, mode, sync, key);
1454}
1455
1456void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
1457{
1458 __wake_up_sync_key(&parent->signal->wait_chldexit,
1459 TASK_INTERRUPTIBLE, 1, p);
1460}
1461
1462static long do_wait(struct wait_opts *wo)
1463{
1464 struct task_struct *tsk;
1465 int retval;
1466
1467 trace_sched_process_wait(wo->wo_pid);
1468
1469 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
1470 wo->child_wait.private = current;
1471 add_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
1472repeat:
1473
1474
1475
1476
1477
1478
1479 wo->notask_error = -ECHILD;
1480 if ((wo->wo_type < PIDTYPE_MAX) &&
1481 (!wo->wo_pid || hlist_empty(&wo->wo_pid->tasks[wo->wo_type])))
1482 goto notask;
1483
1484 set_current_state(TASK_INTERRUPTIBLE);
1485 read_lock(&tasklist_lock);
1486 tsk = current;
1487 do {
1488 retval = do_wait_thread(wo, tsk);
1489 if (retval)
1490 goto end;
1491
1492 retval = ptrace_do_wait(wo, tsk);
1493 if (retval)
1494 goto end;
1495
1496 if (wo->wo_flags & __WNOTHREAD)
1497 break;
1498 } while_each_thread(current, tsk);
1499 read_unlock(&tasklist_lock);
1500
1501notask:
1502 retval = wo->notask_error;
1503 if (!retval && !(wo->wo_flags & WNOHANG)) {
1504 retval = -ERESTARTSYS;
1505 if (!signal_pending(current)) {
1506 schedule();
1507 goto repeat;
1508 }
1509 }
1510end:
1511 __set_current_state(TASK_RUNNING);
1512 remove_wait_queue(¤t->signal->wait_chldexit, &wo->child_wait);
1513 return retval;
1514}
1515
1516SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1517 infop, int, options, struct rusage __user *, ru)
1518{
1519 struct wait_opts wo;
1520 struct pid *pid = NULL;
1521 enum pid_type type;
1522 long ret;
1523
1524 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
1525 return -EINVAL;
1526 if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1527 return -EINVAL;
1528
1529 switch (which) {
1530 case P_ALL:
1531 type = PIDTYPE_MAX;
1532 break;
1533 case P_PID:
1534 type = PIDTYPE_PID;
1535 if (upid <= 0)
1536 return -EINVAL;
1537 break;
1538 case P_PGID:
1539 type = PIDTYPE_PGID;
1540 if (upid <= 0)
1541 return -EINVAL;
1542 break;
1543 default:
1544 return -EINVAL;
1545 }
1546
1547 if (type < PIDTYPE_MAX)
1548 pid = find_get_pid(upid);
1549
1550 wo.wo_type = type;
1551 wo.wo_pid = pid;
1552 wo.wo_flags = options;
1553 wo.wo_info = infop;
1554 wo.wo_stat = NULL;
1555 wo.wo_rusage = ru;
1556 ret = do_wait(&wo);
1557
1558 if (ret > 0) {
1559 ret = 0;
1560 } else if (infop) {
1561
1562
1563
1564
1565
1566 if (!ret)
1567 ret = put_user(0, &infop->si_signo);
1568 if (!ret)
1569 ret = put_user(0, &infop->si_errno);
1570 if (!ret)
1571 ret = put_user(0, &infop->si_code);
1572 if (!ret)
1573 ret = put_user(0, &infop->si_pid);
1574 if (!ret)
1575 ret = put_user(0, &infop->si_uid);
1576 if (!ret)
1577 ret = put_user(0, &infop->si_status);
1578 }
1579
1580 put_pid(pid);
1581 return ret;
1582}
1583
1584SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1585 int, options, struct rusage __user *, ru)
1586{
1587 struct wait_opts wo;
1588 struct pid *pid = NULL;
1589 enum pid_type type;
1590 long ret;
1591
1592 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1593 __WNOTHREAD|__WCLONE|__WALL))
1594 return -EINVAL;
1595
1596 if (upid == -1)
1597 type = PIDTYPE_MAX;
1598 else if (upid < 0) {
1599 type = PIDTYPE_PGID;
1600 pid = find_get_pid(-upid);
1601 } else if (upid == 0) {
1602 type = PIDTYPE_PGID;
1603 pid = get_task_pid(current, PIDTYPE_PGID);
1604 } else {
1605 type = PIDTYPE_PID;
1606 pid = find_get_pid(upid);
1607 }
1608
1609 wo.wo_type = type;
1610 wo.wo_pid = pid;
1611 wo.wo_flags = options | WEXITED;
1612 wo.wo_info = NULL;
1613 wo.wo_stat = stat_addr;
1614 wo.wo_rusage = ru;
1615 ret = do_wait(&wo);
1616 put_pid(pid);
1617
1618 return ret;
1619}
1620
1621#ifdef __ARCH_WANT_SYS_WAITPID
1622
1623
1624
1625
1626
1627SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
1628{
1629 return sys_wait4(pid, stat_addr, options, NULL);
1630}
1631
1632#endif
1633