1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/ptrace.h>
23#include <linux/signal.h>
24#include <linux/signalfd.h>
25#include <linux/tracehook.h>
26#include <linux/capability.h>
27#include <linux/freezer.h>
28#include <linux/pid_namespace.h>
29#include <linux/nsproxy.h>
30#include <trace/events/sched.h>
31
32#include <asm/param.h>
33#include <asm/uaccess.h>
34#include <asm/unistd.h>
35#include <asm/siginfo.h>
36#include "audit.h"
37
38
39
40
41
42static struct kmem_cache *sigqueue_cachep;
43
44static void __user *sig_handler(struct task_struct *t, int sig)
45{
46 return t->sighand->action[sig - 1].sa.sa_handler;
47}
48
49static int sig_handler_ignored(void __user *handler, int sig)
50{
51
52 return handler == SIG_IGN ||
53 (handler == SIG_DFL && sig_kernel_ignore(sig));
54}
55
56static int sig_task_ignored(struct task_struct *t, int sig,
57 int from_ancestor_ns)
58{
59 void __user *handler;
60
61 handler = sig_handler(t, sig);
62
63 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
64 handler == SIG_DFL && !from_ancestor_ns)
65 return 1;
66
67 return sig_handler_ignored(handler, sig);
68}
69
70static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
71{
72
73
74
75
76
77 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
78 return 0;
79
80 if (!sig_task_ignored(t, sig, from_ancestor_ns))
81 return 0;
82
83
84
85
86 return !tracehook_consider_ignored_signal(t, sig);
87}
88
89
90
91
92
93static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
94{
95 unsigned long ready;
96 long i;
97
98 switch (_NSIG_WORDS) {
99 default:
100 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
101 ready |= signal->sig[i] &~ blocked->sig[i];
102 break;
103
104 case 4: ready = signal->sig[3] &~ blocked->sig[3];
105 ready |= signal->sig[2] &~ blocked->sig[2];
106 ready |= signal->sig[1] &~ blocked->sig[1];
107 ready |= signal->sig[0] &~ blocked->sig[0];
108 break;
109
110 case 2: ready = signal->sig[1] &~ blocked->sig[1];
111 ready |= signal->sig[0] &~ blocked->sig[0];
112 break;
113
114 case 1: ready = signal->sig[0] &~ blocked->sig[0];
115 }
116 return ready != 0;
117}
118
119#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
120
121static int recalc_sigpending_tsk(struct task_struct *t)
122{
123 if (t->signal->group_stop_count > 0 ||
124 PENDING(&t->pending, &t->blocked) ||
125 PENDING(&t->signal->shared_pending, &t->blocked)) {
126 set_tsk_thread_flag(t, TIF_SIGPENDING);
127 return 1;
128 }
129
130
131
132
133
134 return 0;
135}
136
137
138
139
140
141void recalc_sigpending_and_wake(struct task_struct *t)
142{
143 if (recalc_sigpending_tsk(t))
144 signal_wake_up(t, 0);
145}
146
147void recalc_sigpending(void)
148{
149 if (unlikely(tracehook_force_sigpending()))
150 set_thread_flag(TIF_SIGPENDING);
151 else if (!recalc_sigpending_tsk(current) && !freezing(current))
152 clear_thread_flag(TIF_SIGPENDING);
153
154}
155
156
157
158int next_signal(struct sigpending *pending, sigset_t *mask)
159{
160 unsigned long i, *s, *m, x;
161 int sig = 0;
162
163 s = pending->signal.sig;
164 m = mask->sig;
165 switch (_NSIG_WORDS) {
166 default:
167 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
168 if ((x = *s &~ *m) != 0) {
169 sig = ffz(~x) + i*_NSIG_BPW + 1;
170 break;
171 }
172 break;
173
174 case 2: if ((x = s[0] &~ m[0]) != 0)
175 sig = 1;
176 else if ((x = s[1] &~ m[1]) != 0)
177 sig = _NSIG_BPW + 1;
178 else
179 break;
180 sig += ffz(~x);
181 break;
182
183 case 1: if ((x = *s &~ *m) != 0)
184 sig = ffz(~x) + 1;
185 break;
186 }
187
188 return sig;
189}
190
191
192
193
194
195
196static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
197 int override_rlimit)
198{
199 struct sigqueue *q = NULL;
200 struct user_struct *user;
201
202
203
204
205
206
207
208 user = get_uid(__task_cred(t)->user);
209 atomic_inc(&user->sigpending);
210 if (override_rlimit ||
211 atomic_read(&user->sigpending) <=
212 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
213 q = kmem_cache_alloc(sigqueue_cachep, flags);
214 if (unlikely(q == NULL)) {
215 atomic_dec(&user->sigpending);
216 free_uid(user);
217 } else {
218 INIT_LIST_HEAD(&q->list);
219 q->flags = 0;
220 q->user = user;
221 }
222
223 return q;
224}
225
226static void __sigqueue_free(struct sigqueue *q)
227{
228 if (q->flags & SIGQUEUE_PREALLOC)
229 return;
230 atomic_dec(&q->user->sigpending);
231 free_uid(q->user);
232 kmem_cache_free(sigqueue_cachep, q);
233}
234
235void flush_sigqueue(struct sigpending *queue)
236{
237 struct sigqueue *q;
238
239 sigemptyset(&queue->signal);
240 while (!list_empty(&queue->list)) {
241 q = list_entry(queue->list.next, struct sigqueue , list);
242 list_del_init(&q->list);
243 __sigqueue_free(q);
244 }
245}
246
247
248
249
250void __flush_signals(struct task_struct *t)
251{
252 clear_tsk_thread_flag(t, TIF_SIGPENDING);
253 flush_sigqueue(&t->pending);
254 flush_sigqueue(&t->signal->shared_pending);
255}
256
257void flush_signals(struct task_struct *t)
258{
259 unsigned long flags;
260
261 spin_lock_irqsave(&t->sighand->siglock, flags);
262 __flush_signals(t);
263 spin_unlock_irqrestore(&t->sighand->siglock, flags);
264}
265
266static void __flush_itimer_signals(struct sigpending *pending)
267{
268 sigset_t signal, retain;
269 struct sigqueue *q, *n;
270
271 signal = pending->signal;
272 sigemptyset(&retain);
273
274 list_for_each_entry_safe(q, n, &pending->list, list) {
275 int sig = q->info.si_signo;
276
277 if (likely(q->info.si_code != SI_TIMER)) {
278 sigaddset(&retain, sig);
279 } else {
280 sigdelset(&signal, sig);
281 list_del_init(&q->list);
282 __sigqueue_free(q);
283 }
284 }
285
286 sigorsets(&pending->signal, &signal, &retain);
287}
288
289void flush_itimer_signals(void)
290{
291 struct task_struct *tsk = current;
292 unsigned long flags;
293
294 spin_lock_irqsave(&tsk->sighand->siglock, flags);
295 __flush_itimer_signals(&tsk->pending);
296 __flush_itimer_signals(&tsk->signal->shared_pending);
297 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
298}
299
300void ignore_signals(struct task_struct *t)
301{
302 int i;
303
304 for (i = 0; i < _NSIG; ++i)
305 t->sighand->action[i].sa.sa_handler = SIG_IGN;
306
307 flush_signals(t);
308}
309
310
311
312
313
314void
315flush_signal_handlers(struct task_struct *t, int force_default)
316{
317 int i;
318 struct k_sigaction *ka = &t->sighand->action[0];
319 for (i = _NSIG ; i != 0 ; i--) {
320 if (force_default || ka->sa.sa_handler != SIG_IGN)
321 ka->sa.sa_handler = SIG_DFL;
322 ka->sa.sa_flags = 0;
323 sigemptyset(&ka->sa.sa_mask);
324 ka++;
325 }
326}
327
328int unhandled_signal(struct task_struct *tsk, int sig)
329{
330 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
331 if (is_global_init(tsk))
332 return 1;
333 if (handler != SIG_IGN && handler != SIG_DFL)
334 return 0;
335 return !tracehook_consider_fatal_signal(tsk, sig);
336}
337
338
339
340
341
342
343
344
345
346
347void
348block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
349{
350 unsigned long flags;
351
352 spin_lock_irqsave(¤t->sighand->siglock, flags);
353 current->notifier_mask = mask;
354 current->notifier_data = priv;
355 current->notifier = notifier;
356 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
357}
358
359
360
361void
362unblock_all_signals(void)
363{
364 unsigned long flags;
365
366 spin_lock_irqsave(¤t->sighand->siglock, flags);
367 current->notifier = NULL;
368 current->notifier_data = NULL;
369 recalc_sigpending();
370 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
371}
372
373static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
374{
375 struct sigqueue *q, *first = NULL;
376
377
378
379
380
381 list_for_each_entry(q, &list->list, list) {
382 if (q->info.si_signo == sig) {
383 if (first)
384 goto still_pending;
385 first = q;
386 }
387 }
388
389 sigdelset(&list->signal, sig);
390
391 if (first) {
392still_pending:
393 list_del_init(&first->list);
394 copy_siginfo(info, &first->info);
395 __sigqueue_free(first);
396 } else {
397
398
399
400
401 info->si_signo = sig;
402 info->si_errno = 0;
403 info->si_code = 0;
404 info->si_pid = 0;
405 info->si_uid = 0;
406 }
407}
408
409static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
410 siginfo_t *info)
411{
412 int sig = next_signal(pending, mask);
413
414 if (sig) {
415 if (current->notifier) {
416 if (sigismember(current->notifier_mask, sig)) {
417 if (!(current->notifier)(current->notifier_data)) {
418 clear_thread_flag(TIF_SIGPENDING);
419 return 0;
420 }
421 }
422 }
423
424 collect_signal(sig, pending, info);
425 }
426
427 return sig;
428}
429
430
431
432
433
434
435
436int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
437{
438 int signr;
439
440
441
442
443 signr = __dequeue_signal(&tsk->pending, mask, info);
444 if (!signr) {
445 signr = __dequeue_signal(&tsk->signal->shared_pending,
446 mask, info);
447
448
449
450
451
452
453
454
455
456
457
458
459
460 if (unlikely(signr == SIGALRM)) {
461 struct hrtimer *tmr = &tsk->signal->real_timer;
462
463 if (!hrtimer_is_queued(tmr) &&
464 tsk->signal->it_real_incr.tv64 != 0) {
465 hrtimer_forward(tmr, tmr->base->get_time(),
466 tsk->signal->it_real_incr);
467 hrtimer_restart(tmr);
468 }
469 }
470 }
471
472 recalc_sigpending();
473 if (!signr)
474 return 0;
475
476 if (unlikely(sig_kernel_stop(signr))) {
477
478
479
480
481
482
483
484
485
486
487
488
489 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
490 }
491 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
492
493
494
495
496
497
498 spin_unlock(&tsk->sighand->siglock);
499 do_schedule_next_timer(info);
500 spin_lock(&tsk->sighand->siglock);
501 }
502 return signr;
503}
504
505
506
507
508
509
510
511
512
513
514
515
516void signal_wake_up(struct task_struct *t, int resume)
517{
518 unsigned int mask;
519
520 set_tsk_thread_flag(t, TIF_SIGPENDING);
521
522
523
524
525
526
527
528
529 mask = TASK_INTERRUPTIBLE;
530 if (resume)
531 mask |= TASK_WAKEKILL;
532 if (!wake_up_state(t, mask))
533 kick_process(t);
534}
535
536
537
538
539
540
541
542
543
544
545static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
546{
547 struct sigqueue *q, *n;
548 sigset_t m;
549
550 sigandsets(&m, mask, &s->signal);
551 if (sigisemptyset(&m))
552 return 0;
553
554 signandsets(&s->signal, &s->signal, mask);
555 list_for_each_entry_safe(q, n, &s->list, list) {
556 if (sigismember(mask, q->info.si_signo)) {
557 list_del_init(&q->list);
558 __sigqueue_free(q);
559 }
560 }
561 return 1;
562}
563
564
565
566
567
568
569static int rm_from_queue(unsigned long mask, struct sigpending *s)
570{
571 struct sigqueue *q, *n;
572
573 if (!sigtestsetmask(&s->signal, mask))
574 return 0;
575
576 sigdelsetmask(&s->signal, mask);
577 list_for_each_entry_safe(q, n, &s->list, list) {
578 if (q->info.si_signo < SIGRTMIN &&
579 (mask & sigmask(q->info.si_signo))) {
580 list_del_init(&q->list);
581 __sigqueue_free(q);
582 }
583 }
584 return 1;
585}
586
587
588
589
590
591static int check_kill_permission(int sig, struct siginfo *info,
592 struct task_struct *t)
593{
594 const struct cred *cred = current_cred(), *tcred;
595 struct pid *sid;
596 int error;
597
598 if (!valid_signal(sig))
599 return -EINVAL;
600
601 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
602 return 0;
603
604 error = audit_signal_info(sig, t);
605 if (error)
606 return error;
607
608 tcred = __task_cred(t);
609 if ((cred->euid ^ tcred->suid) &&
610 (cred->euid ^ tcred->uid) &&
611 (cred->uid ^ tcred->suid) &&
612 (cred->uid ^ tcred->uid) &&
613 !capable(CAP_KILL)) {
614 switch (sig) {
615 case SIGCONT:
616 sid = task_session(t);
617
618
619
620
621 if (!sid || sid == task_session(current))
622 break;
623 default:
624 return -EPERM;
625 }
626 }
627
628 return security_task_kill(t, info, sig, 0);
629}
630
631
632
633
634
635
636
637
638
639
640
641static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
642{
643 struct signal_struct *signal = p->signal;
644 struct task_struct *t;
645
646 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
647
648
649
650 } else if (sig_kernel_stop(sig)) {
651
652
653
654 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
655 t = p;
656 do {
657 rm_from_queue(sigmask(SIGCONT), &t->pending);
658 } while_each_thread(p, t);
659 } else if (sig == SIGCONT) {
660 unsigned int why;
661
662
663
664
665 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
666 t = p;
667 do {
668 unsigned int state;
669 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684 state = __TASK_STOPPED;
685 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
686 set_tsk_thread_flag(t, TIF_SIGPENDING);
687 state |= TASK_INTERRUPTIBLE;
688 }
689 wake_up_state(t, state);
690 } while_each_thread(p, t);
691
692
693
694
695
696
697
698
699
700 why = 0;
701 if (signal->flags & SIGNAL_STOP_STOPPED)
702 why |= SIGNAL_CLD_CONTINUED;
703 else if (signal->group_stop_count)
704 why |= SIGNAL_CLD_STOPPED;
705
706 if (why) {
707
708
709
710
711
712 signal->flags = why | SIGNAL_STOP_CONTINUED;
713 signal->group_stop_count = 0;
714 signal->group_exit_code = 0;
715 } else {
716
717
718
719
720
721 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
722 }
723 }
724
725 return !sig_ignored(p, sig, from_ancestor_ns);
726}
727
728
729
730
731
732
733
734
735
736static inline int wants_signal(int sig, struct task_struct *p)
737{
738 if (sigismember(&p->blocked, sig))
739 return 0;
740 if (p->flags & PF_EXITING)
741 return 0;
742 if (sig == SIGKILL)
743 return 1;
744 if (task_is_stopped_or_traced(p))
745 return 0;
746 return task_curr(p) || !signal_pending(p);
747}
748
749static void complete_signal(int sig, struct task_struct *p, int group)
750{
751 struct signal_struct *signal = p->signal;
752 struct task_struct *t;
753
754
755
756
757
758
759
760 if (wants_signal(sig, p))
761 t = p;
762 else if (!group || thread_group_empty(p))
763
764
765
766
767 return;
768 else {
769
770
771
772 t = signal->curr_target;
773 while (!wants_signal(sig, t)) {
774 t = next_thread(t);
775 if (t == signal->curr_target)
776
777
778
779
780
781 return;
782 }
783 signal->curr_target = t;
784 }
785
786
787
788
789
790 if (sig_fatal(p, sig) &&
791 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
792 !sigismember(&t->real_blocked, sig) &&
793 (sig == SIGKILL ||
794 !tracehook_consider_fatal_signal(t, sig))) {
795
796
797
798 if (!sig_kernel_coredump(sig)) {
799
800
801
802
803
804
805 signal->flags = SIGNAL_GROUP_EXIT;
806 signal->group_exit_code = sig;
807 signal->group_stop_count = 0;
808 t = p;
809 do {
810 sigaddset(&t->pending.signal, SIGKILL);
811 signal_wake_up(t, 1);
812 } while_each_thread(p, t);
813 return;
814 }
815 }
816
817
818
819
820
821 signal_wake_up(t, sig == SIGKILL);
822 return;
823}
824
825static inline int legacy_queue(struct sigpending *signals, int sig)
826{
827 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
828}
829
830static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
831 int group, int from_ancestor_ns)
832{
833 struct sigpending *pending;
834 struct sigqueue *q;
835 int override_rlimit;
836
837 trace_sched_signal_send(sig, t);
838
839 assert_spin_locked(&t->sighand->siglock);
840
841 if (!prepare_signal(sig, t, from_ancestor_ns))
842 return 0;
843
844 pending = group ? &t->signal->shared_pending : &t->pending;
845
846
847
848
849
850 if (legacy_queue(pending, sig))
851 return 0;
852
853
854
855
856 if (info == SEND_SIG_FORCED)
857 goto out_set;
858
859
860
861
862
863
864
865
866
867 if (sig < SIGRTMIN)
868 override_rlimit = (is_si_special(info) || info->si_code >= 0);
869 else
870 override_rlimit = 0;
871
872 q = __sigqueue_alloc(t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
873 override_rlimit);
874 if (q) {
875 list_add_tail(&q->list, &pending->list);
876 switch ((unsigned long) info) {
877 case (unsigned long) SEND_SIG_NOINFO:
878 q->info.si_signo = sig;
879 q->info.si_errno = 0;
880 q->info.si_code = SI_USER;
881 q->info.si_pid = task_tgid_nr_ns(current,
882 task_active_pid_ns(t));
883 q->info.si_uid = current_uid();
884 break;
885 case (unsigned long) SEND_SIG_PRIV:
886 q->info.si_signo = sig;
887 q->info.si_errno = 0;
888 q->info.si_code = SI_KERNEL;
889 q->info.si_pid = 0;
890 q->info.si_uid = 0;
891 break;
892 default:
893 copy_siginfo(&q->info, info);
894 if (from_ancestor_ns)
895 q->info.si_pid = 0;
896 break;
897 }
898 } else if (!is_si_special(info)) {
899 if (sig >= SIGRTMIN && info->si_code != SI_USER)
900
901
902
903
904 return -EAGAIN;
905 }
906
907out_set:
908 signalfd_notify(t, sig);
909 sigaddset(&pending->signal, sig);
910 complete_signal(sig, t, group);
911 return 0;
912}
913
914static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
915 int group)
916{
917 int from_ancestor_ns = 0;
918
919#ifdef CONFIG_PID_NS
920 if (!is_si_special(info) && SI_FROMUSER(info) &&
921 task_pid_nr_ns(current, task_active_pid_ns(t)) <= 0)
922 from_ancestor_ns = 1;
923#endif
924
925 return __send_signal(sig, info, t, group, from_ancestor_ns);
926}
927
928int print_fatal_signals;
929
930static void print_fatal_signal(struct pt_regs *regs, int signr)
931{
932 printk("%s/%d: potentially unexpected fatal signal %d.\n",
933 current->comm, task_pid_nr(current), signr);
934
935#if defined(__i386__) && !defined(__arch_um__)
936 printk("code at %08lx: ", regs->ip);
937 {
938 int i;
939 for (i = 0; i < 16; i++) {
940 unsigned char insn;
941
942 __get_user(insn, (unsigned char *)(regs->ip + i));
943 printk("%02x ", insn);
944 }
945 }
946#endif
947 printk("\n");
948 preempt_disable();
949 show_regs(regs);
950 preempt_enable();
951}
952
953static int __init setup_print_fatal_signals(char *str)
954{
955 get_option (&str, &print_fatal_signals);
956
957 return 1;
958}
959
960__setup("print-fatal-signals=", setup_print_fatal_signals);
961
962int
963__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
964{
965 return send_signal(sig, info, p, 1);
966}
967
968static int
969specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
970{
971 return send_signal(sig, info, t, 0);
972}
973
974int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
975 bool group)
976{
977 unsigned long flags;
978 int ret = -ESRCH;
979
980 if (lock_task_sighand(p, &flags)) {
981 ret = send_signal(sig, info, p, group);
982 unlock_task_sighand(p, &flags);
983 }
984
985 return ret;
986}
987
988
989
990
991
992
993
994
995
996
997
998
999int
1000force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1001{
1002 unsigned long int flags;
1003 int ret, blocked, ignored;
1004 struct k_sigaction *action;
1005
1006 spin_lock_irqsave(&t->sighand->siglock, flags);
1007 action = &t->sighand->action[sig-1];
1008 ignored = action->sa.sa_handler == SIG_IGN;
1009 blocked = sigismember(&t->blocked, sig);
1010 if (blocked || ignored) {
1011 action->sa.sa_handler = SIG_DFL;
1012 if (blocked) {
1013 sigdelset(&t->blocked, sig);
1014 recalc_sigpending_and_wake(t);
1015 }
1016 }
1017 if (action->sa.sa_handler == SIG_DFL)
1018 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1019 ret = specific_send_sig_info(sig, info, t);
1020 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1021
1022 return ret;
1023}
1024
1025void
1026force_sig_specific(int sig, struct task_struct *t)
1027{
1028 force_sig_info(sig, SEND_SIG_FORCED, t);
1029}
1030
1031
1032
1033
1034void zap_other_threads(struct task_struct *p)
1035{
1036 struct task_struct *t;
1037
1038 p->signal->group_stop_count = 0;
1039
1040 for (t = next_thread(p); t != p; t = next_thread(t)) {
1041
1042
1043
1044 if (t->exit_state)
1045 continue;
1046
1047
1048 sigaddset(&t->pending.signal, SIGKILL);
1049 signal_wake_up(t, 1);
1050 }
1051}
1052
1053struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1054{
1055 struct sighand_struct *sighand;
1056
1057 rcu_read_lock();
1058 for (;;) {
1059 sighand = rcu_dereference(tsk->sighand);
1060 if (unlikely(sighand == NULL))
1061 break;
1062
1063 spin_lock_irqsave(&sighand->siglock, *flags);
1064 if (likely(sighand == tsk->sighand))
1065 break;
1066 spin_unlock_irqrestore(&sighand->siglock, *flags);
1067 }
1068 rcu_read_unlock();
1069
1070 return sighand;
1071}
1072
1073
1074
1075
1076
1077int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1078{
1079 int ret = check_kill_permission(sig, info, p);
1080
1081 if (!ret && sig)
1082 ret = do_send_sig_info(sig, info, p, true);
1083
1084 return ret;
1085}
1086
1087
1088
1089
1090
1091
1092int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1093{
1094 struct task_struct *p = NULL;
1095 int retval, success;
1096
1097 success = 0;
1098 retval = -ESRCH;
1099 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1100 int err = group_send_sig_info(sig, info, p);
1101 success |= !err;
1102 retval = err;
1103 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1104 return success ? 0 : retval;
1105}
1106
1107int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1108{
1109 int error = -ESRCH;
1110 struct task_struct *p;
1111
1112 rcu_read_lock();
1113retry:
1114 p = pid_task(pid, PIDTYPE_PID);
1115 if (p) {
1116 error = group_send_sig_info(sig, info, p);
1117 if (unlikely(error == -ESRCH))
1118
1119
1120
1121
1122
1123
1124 goto retry;
1125 }
1126 rcu_read_unlock();
1127
1128 return error;
1129}
1130
1131int
1132kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1133{
1134 int error;
1135 rcu_read_lock();
1136 error = kill_pid_info(sig, info, find_vpid(pid));
1137 rcu_read_unlock();
1138 return error;
1139}
1140
1141
1142int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1143 uid_t uid, uid_t euid, u32 secid)
1144{
1145 int ret = -EINVAL;
1146 struct task_struct *p;
1147 const struct cred *pcred;
1148
1149 if (!valid_signal(sig))
1150 return ret;
1151
1152 read_lock(&tasklist_lock);
1153 p = pid_task(pid, PIDTYPE_PID);
1154 if (!p) {
1155 ret = -ESRCH;
1156 goto out_unlock;
1157 }
1158 pcred = __task_cred(p);
1159 if ((info == SEND_SIG_NOINFO ||
1160 (!is_si_special(info) && SI_FROMUSER(info))) &&
1161 euid != pcred->suid && euid != pcred->uid &&
1162 uid != pcred->suid && uid != pcred->uid) {
1163 ret = -EPERM;
1164 goto out_unlock;
1165 }
1166 ret = security_task_kill(p, info, sig, secid);
1167 if (ret)
1168 goto out_unlock;
1169 if (sig && p->sighand) {
1170 unsigned long flags;
1171 spin_lock_irqsave(&p->sighand->siglock, flags);
1172 ret = __send_signal(sig, info, p, 1, 0);
1173 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1174 }
1175out_unlock:
1176 read_unlock(&tasklist_lock);
1177 return ret;
1178}
1179EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1180
1181
1182
1183
1184
1185
1186
1187
1188static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1189{
1190 int ret;
1191
1192 if (pid > 0) {
1193 rcu_read_lock();
1194 ret = kill_pid_info(sig, info, find_vpid(pid));
1195 rcu_read_unlock();
1196 return ret;
1197 }
1198
1199 read_lock(&tasklist_lock);
1200 if (pid != -1) {
1201 ret = __kill_pgrp_info(sig, info,
1202 pid ? find_vpid(-pid) : task_pgrp(current));
1203 } else {
1204 int retval = 0, count = 0;
1205 struct task_struct * p;
1206
1207 for_each_process(p) {
1208 if (task_pid_vnr(p) > 1 &&
1209 !same_thread_group(p, current)) {
1210 int err = group_send_sig_info(sig, info, p);
1211 ++count;
1212 if (err != -EPERM)
1213 retval = err;
1214 }
1215 }
1216 ret = count ? retval : -ESRCH;
1217 }
1218 read_unlock(&tasklist_lock);
1219
1220 return ret;
1221}
1222
1223
1224
1225
1226
1227int
1228send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1229{
1230
1231
1232
1233
1234 if (!valid_signal(sig))
1235 return -EINVAL;
1236
1237 return do_send_sig_info(sig, info, p, false);
1238}
1239
1240#define __si_special(priv) \
1241 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1242
1243int
1244send_sig(int sig, struct task_struct *p, int priv)
1245{
1246 return send_sig_info(sig, __si_special(priv), p);
1247}
1248
1249void
1250force_sig(int sig, struct task_struct *p)
1251{
1252 force_sig_info(sig, SEND_SIG_PRIV, p);
1253}
1254
1255
1256
1257
1258
1259
1260
1261int
1262force_sigsegv(int sig, struct task_struct *p)
1263{
1264 if (sig == SIGSEGV) {
1265 unsigned long flags;
1266 spin_lock_irqsave(&p->sighand->siglock, flags);
1267 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1268 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1269 }
1270 force_sig(SIGSEGV, p);
1271 return 0;
1272}
1273
1274int kill_pgrp(struct pid *pid, int sig, int priv)
1275{
1276 int ret;
1277
1278 read_lock(&tasklist_lock);
1279 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1280 read_unlock(&tasklist_lock);
1281
1282 return ret;
1283}
1284EXPORT_SYMBOL(kill_pgrp);
1285
1286int kill_pid(struct pid *pid, int sig, int priv)
1287{
1288 return kill_pid_info(sig, __si_special(priv), pid);
1289}
1290EXPORT_SYMBOL(kill_pid);
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302struct sigqueue *sigqueue_alloc(void)
1303{
1304 struct sigqueue *q;
1305
1306 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1307 q->flags |= SIGQUEUE_PREALLOC;
1308 return(q);
1309}
1310
1311void sigqueue_free(struct sigqueue *q)
1312{
1313 unsigned long flags;
1314 spinlock_t *lock = ¤t->sighand->siglock;
1315
1316 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1317
1318
1319
1320
1321
1322 spin_lock_irqsave(lock, flags);
1323 q->flags &= ~SIGQUEUE_PREALLOC;
1324
1325
1326
1327
1328 if (!list_empty(&q->list))
1329 q = NULL;
1330 spin_unlock_irqrestore(lock, flags);
1331
1332 if (q)
1333 __sigqueue_free(q);
1334}
1335
1336int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1337{
1338 int sig = q->info.si_signo;
1339 struct sigpending *pending;
1340 unsigned long flags;
1341 int ret;
1342
1343 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1344
1345 ret = -1;
1346 if (!likely(lock_task_sighand(t, &flags)))
1347 goto ret;
1348
1349 ret = 1;
1350 if (!prepare_signal(sig, t, 0))
1351 goto out;
1352
1353 ret = 0;
1354 if (unlikely(!list_empty(&q->list))) {
1355
1356
1357
1358
1359 BUG_ON(q->info.si_code != SI_TIMER);
1360 q->info.si_overrun++;
1361 goto out;
1362 }
1363 q->info.si_overrun = 0;
1364
1365 signalfd_notify(t, sig);
1366 pending = group ? &t->signal->shared_pending : &t->pending;
1367 list_add_tail(&q->list, &pending->list);
1368 sigaddset(&pending->signal, sig);
1369 complete_signal(sig, t, group);
1370out:
1371 unlock_task_sighand(t, &flags);
1372ret:
1373 return ret;
1374}
1375
1376
1377
1378
1379
1380
1381
1382
1383int do_notify_parent(struct task_struct *tsk, int sig)
1384{
1385 struct siginfo info;
1386 unsigned long flags;
1387 struct sighand_struct *psig;
1388 int ret = sig;
1389
1390 BUG_ON(sig == -1);
1391
1392
1393 BUG_ON(task_is_stopped_or_traced(tsk));
1394
1395 BUG_ON(!task_ptrace(tsk) &&
1396 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1397
1398 info.si_signo = sig;
1399 info.si_errno = 0;
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412 rcu_read_lock();
1413 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1414 info.si_uid = __task_cred(tsk)->uid;
1415 rcu_read_unlock();
1416
1417 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1418 tsk->signal->utime));
1419 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1420 tsk->signal->stime));
1421
1422 info.si_status = tsk->exit_code & 0x7f;
1423 if (tsk->exit_code & 0x80)
1424 info.si_code = CLD_DUMPED;
1425 else if (tsk->exit_code & 0x7f)
1426 info.si_code = CLD_KILLED;
1427 else {
1428 info.si_code = CLD_EXITED;
1429 info.si_status = tsk->exit_code >> 8;
1430 }
1431
1432 psig = tsk->parent->sighand;
1433 spin_lock_irqsave(&psig->siglock, flags);
1434 if (!task_ptrace(tsk) && sig == SIGCHLD &&
1435 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1436 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452 ret = tsk->exit_signal = -1;
1453 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1454 sig = -1;
1455 }
1456 if (valid_signal(sig) && sig > 0)
1457 __group_send_sig_info(sig, &info, tsk->parent);
1458 __wake_up_parent(tsk, tsk->parent);
1459 spin_unlock_irqrestore(&psig->siglock, flags);
1460
1461 return ret;
1462}
1463
1464static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1465{
1466 struct siginfo info;
1467 unsigned long flags;
1468 struct task_struct *parent;
1469 struct sighand_struct *sighand;
1470
1471 if (task_ptrace(tsk))
1472 parent = tsk->parent;
1473 else {
1474 tsk = tsk->group_leader;
1475 parent = tsk->real_parent;
1476 }
1477
1478 info.si_signo = SIGCHLD;
1479 info.si_errno = 0;
1480
1481
1482
1483 rcu_read_lock();
1484 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1485 info.si_uid = __task_cred(tsk)->uid;
1486 rcu_read_unlock();
1487
1488 info.si_utime = cputime_to_clock_t(tsk->utime);
1489 info.si_stime = cputime_to_clock_t(tsk->stime);
1490
1491 info.si_code = why;
1492 switch (why) {
1493 case CLD_CONTINUED:
1494 info.si_status = SIGCONT;
1495 break;
1496 case CLD_STOPPED:
1497 info.si_status = tsk->signal->group_exit_code & 0x7f;
1498 break;
1499 case CLD_TRAPPED:
1500 info.si_status = tsk->exit_code & 0x7f;
1501 break;
1502 default:
1503 BUG();
1504 }
1505
1506 sighand = parent->sighand;
1507 spin_lock_irqsave(&sighand->siglock, flags);
1508 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1509 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1510 __group_send_sig_info(SIGCHLD, &info, parent);
1511
1512
1513
1514 __wake_up_parent(tsk, parent);
1515 spin_unlock_irqrestore(&sighand->siglock, flags);
1516}
1517
1518static inline int may_ptrace_stop(void)
1519{
1520 if (!likely(task_ptrace(current)))
1521 return 0;
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531 if (unlikely(current->mm->core_state) &&
1532 unlikely(current->mm == current->parent->mm))
1533 return 0;
1534
1535 return 1;
1536}
1537
1538
1539
1540
1541
1542static int sigkill_pending(struct task_struct *tsk)
1543{
1544 return sigismember(&tsk->pending.signal, SIGKILL) ||
1545 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1546}
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1560{
1561 if (arch_ptrace_stop_needed(exit_code, info)) {
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573 spin_unlock_irq(¤t->sighand->siglock);
1574 arch_ptrace_stop(exit_code, info);
1575 spin_lock_irq(¤t->sighand->siglock);
1576 if (sigkill_pending(current))
1577 return;
1578 }
1579
1580
1581
1582
1583
1584 if (current->signal->group_stop_count > 0)
1585 --current->signal->group_stop_count;
1586
1587 current->last_siginfo = info;
1588 current->exit_code = exit_code;
1589
1590
1591 __set_current_state(TASK_TRACED);
1592 spin_unlock_irq(¤t->sighand->siglock);
1593 read_lock(&tasklist_lock);
1594 if (may_ptrace_stop()) {
1595 do_notify_parent_cldstop(current, CLD_TRAPPED);
1596
1597
1598
1599
1600
1601
1602 preempt_disable();
1603 read_unlock(&tasklist_lock);
1604 preempt_enable_no_resched();
1605 schedule();
1606 } else {
1607
1608
1609
1610
1611 __set_current_state(TASK_RUNNING);
1612 if (clear_code)
1613 current->exit_code = 0;
1614 read_unlock(&tasklist_lock);
1615 }
1616
1617
1618
1619
1620
1621
1622 try_to_freeze();
1623
1624
1625
1626
1627
1628
1629 spin_lock_irq(¤t->sighand->siglock);
1630 current->last_siginfo = NULL;
1631
1632
1633
1634
1635
1636
1637 recalc_sigpending_tsk(current);
1638}
1639
1640void ptrace_notify(int exit_code)
1641{
1642 siginfo_t info;
1643
1644 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1645
1646 memset(&info, 0, sizeof info);
1647 info.si_signo = SIGTRAP;
1648 info.si_code = exit_code;
1649 info.si_pid = task_pid_vnr(current);
1650 info.si_uid = current_uid();
1651
1652
1653 spin_lock_irq(¤t->sighand->siglock);
1654 ptrace_stop(exit_code, 1, &info);
1655 spin_unlock_irq(¤t->sighand->siglock);
1656}
1657
1658
1659
1660
1661
1662
1663
1664static int do_signal_stop(int signr)
1665{
1666 struct signal_struct *sig = current->signal;
1667 int notify;
1668
1669 if (!sig->group_stop_count) {
1670 struct task_struct *t;
1671
1672 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1673 unlikely(signal_group_exit(sig)))
1674 return 0;
1675
1676
1677
1678
1679 sig->group_exit_code = signr;
1680
1681 sig->group_stop_count = 1;
1682 for (t = next_thread(current); t != current; t = next_thread(t))
1683
1684
1685
1686
1687
1688 if (!(t->flags & PF_EXITING) &&
1689 !task_is_stopped_or_traced(t)) {
1690 sig->group_stop_count++;
1691 signal_wake_up(t, 0);
1692 }
1693 }
1694
1695
1696
1697
1698
1699 notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
1700 notify = tracehook_notify_jctl(notify, CLD_STOPPED);
1701
1702
1703
1704
1705
1706 if (sig->group_stop_count) {
1707 if (!--sig->group_stop_count)
1708 sig->flags = SIGNAL_STOP_STOPPED;
1709 current->exit_code = sig->group_exit_code;
1710 __set_current_state(TASK_STOPPED);
1711 }
1712 spin_unlock_irq(¤t->sighand->siglock);
1713
1714 if (notify) {
1715 read_lock(&tasklist_lock);
1716 do_notify_parent_cldstop(current, notify);
1717 read_unlock(&tasklist_lock);
1718 }
1719
1720
1721 do {
1722 schedule();
1723 } while (try_to_freeze());
1724
1725 tracehook_finish_jctl();
1726 current->exit_code = 0;
1727
1728 return 1;
1729}
1730
1731static int ptrace_signal(int signr, siginfo_t *info,
1732 struct pt_regs *regs, void *cookie)
1733{
1734 if (!task_ptrace(current))
1735 return signr;
1736
1737 ptrace_signal_deliver(regs, cookie);
1738
1739
1740 ptrace_stop(signr, 0, info);
1741
1742
1743 signr = current->exit_code;
1744 if (signr == 0)
1745 return signr;
1746
1747 current->exit_code = 0;
1748
1749
1750
1751
1752
1753 if (signr != info->si_signo) {
1754 info->si_signo = signr;
1755 info->si_errno = 0;
1756 info->si_code = SI_USER;
1757 info->si_pid = task_pid_vnr(current->parent);
1758 info->si_uid = task_uid(current->parent);
1759 }
1760
1761
1762 if (sigismember(¤t->blocked, signr)) {
1763 specific_send_sig_info(signr, info, current);
1764 signr = 0;
1765 }
1766
1767 return signr;
1768}
1769
1770int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1771 struct pt_regs *regs, void *cookie)
1772{
1773 struct sighand_struct *sighand = current->sighand;
1774 struct signal_struct *signal = current->signal;
1775 int signr;
1776
1777relock:
1778
1779
1780
1781
1782
1783
1784 try_to_freeze();
1785
1786 spin_lock_irq(&sighand->siglock);
1787
1788
1789
1790
1791
1792 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1793 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1794 ? CLD_CONTINUED : CLD_STOPPED;
1795 signal->flags &= ~SIGNAL_CLD_MASK;
1796
1797 why = tracehook_notify_jctl(why, CLD_CONTINUED);
1798 spin_unlock_irq(&sighand->siglock);
1799
1800 if (why) {
1801 read_lock(&tasklist_lock);
1802 do_notify_parent_cldstop(current->group_leader, why);
1803 read_unlock(&tasklist_lock);
1804 }
1805 goto relock;
1806 }
1807
1808 for (;;) {
1809 struct k_sigaction *ka;
1810
1811 if (unlikely(signal->group_stop_count > 0) &&
1812 do_signal_stop(0))
1813 goto relock;
1814
1815
1816
1817
1818
1819
1820 signr = tracehook_get_signal(current, regs, info, return_ka);
1821 if (unlikely(signr < 0))
1822 goto relock;
1823 if (unlikely(signr != 0))
1824 ka = return_ka;
1825 else {
1826 signr = dequeue_signal(current, ¤t->blocked,
1827 info);
1828
1829 if (!signr)
1830 break;
1831
1832 if (signr != SIGKILL) {
1833 signr = ptrace_signal(signr, info,
1834 regs, cookie);
1835 if (!signr)
1836 continue;
1837 }
1838
1839 ka = &sighand->action[signr-1];
1840 }
1841
1842 if (ka->sa.sa_handler == SIG_IGN)
1843 continue;
1844 if (ka->sa.sa_handler != SIG_DFL) {
1845
1846 *return_ka = *ka;
1847
1848 if (ka->sa.sa_flags & SA_ONESHOT)
1849 ka->sa.sa_handler = SIG_DFL;
1850
1851 break;
1852 }
1853
1854
1855
1856
1857 if (sig_kernel_ignore(signr))
1858 continue;
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1871 !sig_kernel_only(signr))
1872 continue;
1873
1874 if (sig_kernel_stop(signr)) {
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885 if (signr != SIGSTOP) {
1886 spin_unlock_irq(&sighand->siglock);
1887
1888
1889
1890 if (is_current_pgrp_orphaned())
1891 goto relock;
1892
1893 spin_lock_irq(&sighand->siglock);
1894 }
1895
1896 if (likely(do_signal_stop(info->si_signo))) {
1897
1898 goto relock;
1899 }
1900
1901
1902
1903
1904
1905 continue;
1906 }
1907
1908 spin_unlock_irq(&sighand->siglock);
1909
1910
1911
1912
1913 current->flags |= PF_SIGNALED;
1914
1915 if (sig_kernel_coredump(signr)) {
1916 if (print_fatal_signals)
1917 print_fatal_signal(regs, info->si_signo);
1918
1919
1920
1921
1922
1923
1924
1925
1926 do_coredump(info->si_signo, info->si_signo, regs);
1927 }
1928
1929
1930
1931
1932 do_group_exit(info->si_signo);
1933
1934 }
1935 spin_unlock_irq(&sighand->siglock);
1936 return signr;
1937}
1938
1939void exit_signals(struct task_struct *tsk)
1940{
1941 int group_stop = 0;
1942 struct task_struct *t;
1943
1944 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1945 tsk->flags |= PF_EXITING;
1946 return;
1947 }
1948
1949 spin_lock_irq(&tsk->sighand->siglock);
1950
1951
1952
1953
1954 tsk->flags |= PF_EXITING;
1955 if (!signal_pending(tsk))
1956 goto out;
1957
1958
1959
1960
1961
1962 for (t = tsk; (t = next_thread(t)) != tsk; )
1963 if (!signal_pending(t) && !(t->flags & PF_EXITING))
1964 recalc_sigpending_and_wake(t);
1965
1966 if (unlikely(tsk->signal->group_stop_count) &&
1967 !--tsk->signal->group_stop_count) {
1968 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1969 group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
1970 }
1971out:
1972 spin_unlock_irq(&tsk->sighand->siglock);
1973
1974 if (unlikely(group_stop)) {
1975 read_lock(&tasklist_lock);
1976 do_notify_parent_cldstop(tsk, group_stop);
1977 read_unlock(&tasklist_lock);
1978 }
1979}
1980
1981EXPORT_SYMBOL(recalc_sigpending);
1982EXPORT_SYMBOL_GPL(dequeue_signal);
1983EXPORT_SYMBOL(flush_signals);
1984EXPORT_SYMBOL(force_sig);
1985EXPORT_SYMBOL(send_sig);
1986EXPORT_SYMBOL(send_sig_info);
1987EXPORT_SYMBOL(sigprocmask);
1988EXPORT_SYMBOL(block_all_signals);
1989EXPORT_SYMBOL(unblock_all_signals);
1990
1991
1992
1993
1994
1995
1996SYSCALL_DEFINE0(restart_syscall)
1997{
1998 struct restart_block *restart = ¤t_thread_info()->restart_block;
1999 return restart->fn(restart);
2000}
2001
2002long do_no_restart_syscall(struct restart_block *param)
2003{
2004 return -EINTR;
2005}
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2022{
2023 int error;
2024
2025 spin_lock_irq(¤t->sighand->siglock);
2026 if (oldset)
2027 *oldset = current->blocked;
2028
2029 error = 0;
2030 switch (how) {
2031 case SIG_BLOCK:
2032 sigorsets(¤t->blocked, ¤t->blocked, set);
2033 break;
2034 case SIG_UNBLOCK:
2035 signandsets(¤t->blocked, ¤t->blocked, set);
2036 break;
2037 case SIG_SETMASK:
2038 current->blocked = *set;
2039 break;
2040 default:
2041 error = -EINVAL;
2042 }
2043 recalc_sigpending();
2044 spin_unlock_irq(¤t->sighand->siglock);
2045
2046 return error;
2047}
2048
2049SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2050 sigset_t __user *, oset, size_t, sigsetsize)
2051{
2052 int error = -EINVAL;
2053 sigset_t old_set, new_set;
2054
2055
2056 if (sigsetsize != sizeof(sigset_t))
2057 goto out;
2058
2059 if (set) {
2060 error = -EFAULT;
2061 if (copy_from_user(&new_set, set, sizeof(*set)))
2062 goto out;
2063 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2064
2065 error = sigprocmask(how, &new_set, &old_set);
2066 if (error)
2067 goto out;
2068 if (oset)
2069 goto set_old;
2070 } else if (oset) {
2071 spin_lock_irq(¤t->sighand->siglock);
2072 old_set = current->blocked;
2073 spin_unlock_irq(¤t->sighand->siglock);
2074
2075 set_old:
2076 error = -EFAULT;
2077 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2078 goto out;
2079 }
2080 error = 0;
2081out:
2082 return error;
2083}
2084
2085long do_sigpending(void __user *set, unsigned long sigsetsize)
2086{
2087 long error = -EINVAL;
2088 sigset_t pending;
2089
2090 if (sigsetsize > sizeof(sigset_t))
2091 goto out;
2092
2093 spin_lock_irq(¤t->sighand->siglock);
2094 sigorsets(&pending, ¤t->pending.signal,
2095 ¤t->signal->shared_pending.signal);
2096 spin_unlock_irq(¤t->sighand->siglock);
2097
2098
2099 sigandsets(&pending, ¤t->blocked, &pending);
2100
2101 error = -EFAULT;
2102 if (!copy_to_user(set, &pending, sigsetsize))
2103 error = 0;
2104
2105out:
2106 return error;
2107}
2108
2109SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2110{
2111 return do_sigpending(set, sigsetsize);
2112}
2113
2114#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2115
2116int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2117{
2118 int err;
2119
2120 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2121 return -EFAULT;
2122 if (from->si_code < 0)
2123 return __copy_to_user(to, from, sizeof(siginfo_t))
2124 ? -EFAULT : 0;
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134 err = __put_user(from->si_signo, &to->si_signo);
2135 err |= __put_user(from->si_errno, &to->si_errno);
2136 err |= __put_user((short)from->si_code, &to->si_code);
2137 switch (from->si_code & __SI_MASK) {
2138 case __SI_KILL:
2139 err |= __put_user(from->si_pid, &to->si_pid);
2140 err |= __put_user(from->si_uid, &to->si_uid);
2141 break;
2142 case __SI_TIMER:
2143 err |= __put_user(from->si_tid, &to->si_tid);
2144 err |= __put_user(from->si_overrun, &to->si_overrun);
2145 err |= __put_user(from->si_ptr, &to->si_ptr);
2146 break;
2147 case __SI_POLL:
2148 err |= __put_user(from->si_band, &to->si_band);
2149 err |= __put_user(from->si_fd, &to->si_fd);
2150 break;
2151 case __SI_FAULT:
2152 err |= __put_user(from->si_addr, &to->si_addr);
2153#ifdef __ARCH_SI_TRAPNO
2154 err |= __put_user(from->si_trapno, &to->si_trapno);
2155#endif
2156 break;
2157 case __SI_CHLD:
2158 err |= __put_user(from->si_pid, &to->si_pid);
2159 err |= __put_user(from->si_uid, &to->si_uid);
2160 err |= __put_user(from->si_status, &to->si_status);
2161 err |= __put_user(from->si_utime, &to->si_utime);
2162 err |= __put_user(from->si_stime, &to->si_stime);
2163 break;
2164 case __SI_RT:
2165 case __SI_MESGQ:
2166 err |= __put_user(from->si_pid, &to->si_pid);
2167 err |= __put_user(from->si_uid, &to->si_uid);
2168 err |= __put_user(from->si_ptr, &to->si_ptr);
2169 break;
2170 default:
2171 err |= __put_user(from->si_pid, &to->si_pid);
2172 err |= __put_user(from->si_uid, &to->si_uid);
2173 break;
2174 }
2175 return err;
2176}
2177
2178#endif
2179
2180SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2181 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2182 size_t, sigsetsize)
2183{
2184 int ret, sig;
2185 sigset_t these;
2186 struct timespec ts;
2187 siginfo_t info;
2188 long timeout = 0;
2189
2190
2191 if (sigsetsize != sizeof(sigset_t))
2192 return -EINVAL;
2193
2194 if (copy_from_user(&these, uthese, sizeof(these)))
2195 return -EFAULT;
2196
2197
2198
2199
2200
2201 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2202 signotset(&these);
2203
2204 if (uts) {
2205 if (copy_from_user(&ts, uts, sizeof(ts)))
2206 return -EFAULT;
2207 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2208 || ts.tv_sec < 0)
2209 return -EINVAL;
2210 }
2211
2212 spin_lock_irq(¤t->sighand->siglock);
2213 sig = dequeue_signal(current, &these, &info);
2214 if (!sig) {
2215 timeout = MAX_SCHEDULE_TIMEOUT;
2216 if (uts)
2217 timeout = (timespec_to_jiffies(&ts)
2218 + (ts.tv_sec || ts.tv_nsec));
2219
2220 if (timeout) {
2221
2222
2223
2224 current->real_blocked = current->blocked;
2225 sigandsets(¤t->blocked, ¤t->blocked, &these);
2226 recalc_sigpending();
2227 spin_unlock_irq(¤t->sighand->siglock);
2228
2229 timeout = schedule_timeout_interruptible(timeout);
2230
2231 spin_lock_irq(¤t->sighand->siglock);
2232 sig = dequeue_signal(current, &these, &info);
2233 current->blocked = current->real_blocked;
2234 siginitset(¤t->real_blocked, 0);
2235 recalc_sigpending();
2236 }
2237 }
2238 spin_unlock_irq(¤t->sighand->siglock);
2239
2240 if (sig) {
2241 ret = sig;
2242 if (uinfo) {
2243 if (copy_siginfo_to_user(uinfo, &info))
2244 ret = -EFAULT;
2245 }
2246 } else {
2247 ret = -EAGAIN;
2248 if (timeout)
2249 ret = -EINTR;
2250 }
2251
2252 return ret;
2253}
2254
2255SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2256{
2257 struct siginfo info;
2258
2259 info.si_signo = sig;
2260 info.si_errno = 0;
2261 info.si_code = SI_USER;
2262 info.si_pid = task_tgid_vnr(current);
2263 info.si_uid = current_uid();
2264
2265 return kill_something_info(sig, &info, pid);
2266}
2267
2268static int
2269do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2270{
2271 struct task_struct *p;
2272 int error = -ESRCH;
2273
2274 rcu_read_lock();
2275 p = find_task_by_vpid(pid);
2276 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2277 error = check_kill_permission(sig, info, p);
2278
2279
2280
2281
2282 if (!error && sig) {
2283 error = do_send_sig_info(sig, info, p, false);
2284
2285
2286
2287
2288
2289 if (unlikely(error == -ESRCH))
2290 error = 0;
2291 }
2292 }
2293 rcu_read_unlock();
2294
2295 return error;
2296}
2297
2298static int do_tkill(pid_t tgid, pid_t pid, int sig)
2299{
2300 struct siginfo info;
2301
2302 info.si_signo = sig;
2303 info.si_errno = 0;
2304 info.si_code = SI_TKILL;
2305 info.si_pid = task_tgid_vnr(current);
2306 info.si_uid = current_uid();
2307
2308 return do_send_specific(tgid, pid, sig, &info);
2309}
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2322{
2323
2324 if (pid <= 0 || tgid <= 0)
2325 return -EINVAL;
2326
2327 return do_tkill(tgid, pid, sig);
2328}
2329
2330
2331
2332
2333SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2334{
2335
2336 if (pid <= 0)
2337 return -EINVAL;
2338
2339 return do_tkill(0, pid, sig);
2340}
2341
2342SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2343 siginfo_t __user *, uinfo)
2344{
2345 siginfo_t info;
2346
2347 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2348 return -EFAULT;
2349
2350
2351
2352 if (info.si_code >= 0)
2353 return -EPERM;
2354 info.si_signo = sig;
2355
2356
2357 return kill_proc_info(sig, &info, pid);
2358}
2359
2360long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2361{
2362
2363 if (pid <= 0 || tgid <= 0)
2364 return -EINVAL;
2365
2366
2367
2368 if (info->si_code >= 0)
2369 return -EPERM;
2370 info->si_signo = sig;
2371
2372 return do_send_specific(tgid, pid, sig, info);
2373}
2374
2375SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2376 siginfo_t __user *, uinfo)
2377{
2378 siginfo_t info;
2379
2380 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2381 return -EFAULT;
2382
2383 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2384}
2385
2386int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2387{
2388 struct task_struct *t = current;
2389 struct k_sigaction *k;
2390 sigset_t mask;
2391
2392 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2393 return -EINVAL;
2394
2395 k = &t->sighand->action[sig-1];
2396
2397 spin_lock_irq(¤t->sighand->siglock);
2398 if (oact)
2399 *oact = *k;
2400
2401 if (act) {
2402 sigdelsetmask(&act->sa.sa_mask,
2403 sigmask(SIGKILL) | sigmask(SIGSTOP));
2404 *k = *act;
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2417 sigemptyset(&mask);
2418 sigaddset(&mask, sig);
2419 rm_from_queue_full(&mask, &t->signal->shared_pending);
2420 do {
2421 rm_from_queue_full(&mask, &t->pending);
2422 t = next_thread(t);
2423 } while (t != current);
2424 }
2425 }
2426
2427 spin_unlock_irq(¤t->sighand->siglock);
2428 return 0;
2429}
2430
2431int
2432do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2433{
2434 stack_t oss;
2435 int error;
2436
2437 oss.ss_sp = (void __user *) current->sas_ss_sp;
2438 oss.ss_size = current->sas_ss_size;
2439 oss.ss_flags = sas_ss_flags(sp);
2440
2441 if (uss) {
2442 void __user *ss_sp;
2443 size_t ss_size;
2444 int ss_flags;
2445
2446 error = -EFAULT;
2447 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2448 goto out;
2449 error = __get_user(ss_sp, &uss->ss_sp) |
2450 __get_user(ss_flags, &uss->ss_flags) |
2451 __get_user(ss_size, &uss->ss_size);
2452 if (error)
2453 goto out;
2454
2455 error = -EPERM;
2456 if (on_sig_stack(sp))
2457 goto out;
2458
2459 error = -EINVAL;
2460
2461
2462
2463
2464
2465
2466
2467
2468 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2469 goto out;
2470
2471 if (ss_flags == SS_DISABLE) {
2472 ss_size = 0;
2473 ss_sp = NULL;
2474 } else {
2475 error = -ENOMEM;
2476 if (ss_size < MINSIGSTKSZ)
2477 goto out;
2478 }
2479
2480 current->sas_ss_sp = (unsigned long) ss_sp;
2481 current->sas_ss_size = ss_size;
2482 }
2483
2484 error = 0;
2485 if (uoss) {
2486 error = -EFAULT;
2487 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2488 goto out;
2489 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2490 __put_user(oss.ss_size, &uoss->ss_size) |
2491 __put_user(oss.ss_flags, &uoss->ss_flags);
2492 }
2493
2494out:
2495 return error;
2496}
2497
2498#ifdef __ARCH_WANT_SYS_SIGPENDING
2499
2500SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2501{
2502 return do_sigpending(set, sizeof(*set));
2503}
2504
2505#endif
2506
2507#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2508
2509
2510
2511SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2512 old_sigset_t __user *, oset)
2513{
2514 int error;
2515 old_sigset_t old_set, new_set;
2516
2517 if (set) {
2518 error = -EFAULT;
2519 if (copy_from_user(&new_set, set, sizeof(*set)))
2520 goto out;
2521 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2522
2523 spin_lock_irq(¤t->sighand->siglock);
2524 old_set = current->blocked.sig[0];
2525
2526 error = 0;
2527 switch (how) {
2528 default:
2529 error = -EINVAL;
2530 break;
2531 case SIG_BLOCK:
2532 sigaddsetmask(¤t->blocked, new_set);
2533 break;
2534 case SIG_UNBLOCK:
2535 sigdelsetmask(¤t->blocked, new_set);
2536 break;
2537 case SIG_SETMASK:
2538 current->blocked.sig[0] = new_set;
2539 break;
2540 }
2541
2542 recalc_sigpending();
2543 spin_unlock_irq(¤t->sighand->siglock);
2544 if (error)
2545 goto out;
2546 if (oset)
2547 goto set_old;
2548 } else if (oset) {
2549 old_set = current->blocked.sig[0];
2550 set_old:
2551 error = -EFAULT;
2552 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2553 goto out;
2554 }
2555 error = 0;
2556out:
2557 return error;
2558}
2559#endif
2560
2561#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2562SYSCALL_DEFINE4(rt_sigaction, int, sig,
2563 const struct sigaction __user *, act,
2564 struct sigaction __user *, oact,
2565 size_t, sigsetsize)
2566{
2567 struct k_sigaction new_sa, old_sa;
2568 int ret = -EINVAL;
2569
2570
2571 if (sigsetsize != sizeof(sigset_t))
2572 goto out;
2573
2574 if (act) {
2575 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2576 return -EFAULT;
2577 }
2578
2579 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2580
2581 if (!ret && oact) {
2582 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2583 return -EFAULT;
2584 }
2585out:
2586 return ret;
2587}
2588#endif
2589
2590#ifdef __ARCH_WANT_SYS_SGETMASK
2591
2592
2593
2594
2595SYSCALL_DEFINE0(sgetmask)
2596{
2597
2598 return current->blocked.sig[0];
2599}
2600
2601SYSCALL_DEFINE1(ssetmask, int, newmask)
2602{
2603 int old;
2604
2605 spin_lock_irq(¤t->sighand->siglock);
2606 old = current->blocked.sig[0];
2607
2608 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2609 sigmask(SIGSTOP)));
2610 recalc_sigpending();
2611 spin_unlock_irq(¤t->sighand->siglock);
2612
2613 return old;
2614}
2615#endif
2616
2617#ifdef __ARCH_WANT_SYS_SIGNAL
2618
2619
2620
2621SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
2622{
2623 struct k_sigaction new_sa, old_sa;
2624 int ret;
2625
2626 new_sa.sa.sa_handler = handler;
2627 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2628 sigemptyset(&new_sa.sa.sa_mask);
2629
2630 ret = do_sigaction(sig, &new_sa, &old_sa);
2631
2632 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2633}
2634#endif
2635
2636#ifdef __ARCH_WANT_SYS_PAUSE
2637
2638SYSCALL_DEFINE0(pause)
2639{
2640 current->state = TASK_INTERRUPTIBLE;
2641 schedule();
2642 return -ERESTARTNOHAND;
2643}
2644
2645#endif
2646
2647#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2648SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2649{
2650 sigset_t newset;
2651
2652
2653 if (sigsetsize != sizeof(sigset_t))
2654 return -EINVAL;
2655
2656 if (copy_from_user(&newset, unewset, sizeof(newset)))
2657 return -EFAULT;
2658 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2659
2660 spin_lock_irq(¤t->sighand->siglock);
2661 current->saved_sigmask = current->blocked;
2662 current->blocked = newset;
2663 recalc_sigpending();
2664 spin_unlock_irq(¤t->sighand->siglock);
2665
2666 current->state = TASK_INTERRUPTIBLE;
2667 schedule();
2668 set_restore_sigmask();
2669 return -ERESTARTNOHAND;
2670}
2671#endif
2672
2673__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2674{
2675 return NULL;
2676}
2677
2678void __init signals_init(void)
2679{
2680 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2681}
2682