1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/ptrace.h>
23#include <linux/signal.h>
24#include <linux/signalfd.h>
25#include <linux/ratelimit.h>
26#include <linux/tracehook.h>
27#include <linux/capability.h>
28#include <linux/freezer.h>
29#include <linux/pid_namespace.h>
30#include <linux/nsproxy.h>
31#define CREATE_TRACE_POINTS
32#include <trace/events/signal.h>
33
34#include <asm/param.h>
35#include <asm/uaccess.h>
36#include <asm/unistd.h>
37#include <asm/siginfo.h>
38#include "audit.h"
39
40
41
42
43
44static struct kmem_cache *sigqueue_cachep;
45
46int print_fatal_signals __read_mostly;
47
48static void __user *sig_handler(struct task_struct *t, int sig)
49{
50 return t->sighand->action[sig - 1].sa.sa_handler;
51}
52
53static int sig_handler_ignored(void __user *handler, int sig)
54{
55
56 return handler == SIG_IGN ||
57 (handler == SIG_DFL && sig_kernel_ignore(sig));
58}
59
60static int sig_task_ignored(struct task_struct *t, int sig,
61 int from_ancestor_ns)
62{
63 void __user *handler;
64
65 handler = sig_handler(t, sig);
66
67 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
68 handler == SIG_DFL && !from_ancestor_ns)
69 return 1;
70
71 return sig_handler_ignored(handler, sig);
72}
73
74static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
75{
76
77
78
79
80
81 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
82 return 0;
83
84 if (!sig_task_ignored(t, sig, from_ancestor_ns))
85 return 0;
86
87
88
89
90 return !tracehook_consider_ignored_signal(t, sig);
91}
92
93
94
95
96
97static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
98{
99 unsigned long ready;
100 long i;
101
102 switch (_NSIG_WORDS) {
103 default:
104 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105 ready |= signal->sig[i] &~ blocked->sig[i];
106 break;
107
108 case 4: ready = signal->sig[3] &~ blocked->sig[3];
109 ready |= signal->sig[2] &~ blocked->sig[2];
110 ready |= signal->sig[1] &~ blocked->sig[1];
111 ready |= signal->sig[0] &~ blocked->sig[0];
112 break;
113
114 case 2: ready = signal->sig[1] &~ blocked->sig[1];
115 ready |= signal->sig[0] &~ blocked->sig[0];
116 break;
117
118 case 1: ready = signal->sig[0] &~ blocked->sig[0];
119 }
120 return ready != 0;
121}
122
123#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
124
125static int recalc_sigpending_tsk(struct task_struct *t)
126{
127 if (t->signal->group_stop_count > 0 ||
128 PENDING(&t->pending, &t->blocked) ||
129 PENDING(&t->signal->shared_pending, &t->blocked)) {
130 set_tsk_thread_flag(t, TIF_SIGPENDING);
131 return 1;
132 }
133
134
135
136
137
138 return 0;
139}
140
141
142
143
144
145void recalc_sigpending_and_wake(struct task_struct *t)
146{
147 if (recalc_sigpending_tsk(t))
148 signal_wake_up(t, 0);
149}
150
151void recalc_sigpending(void)
152{
153 if (unlikely(tracehook_force_sigpending()))
154 set_thread_flag(TIF_SIGPENDING);
155 else if (!recalc_sigpending_tsk(current) && !freezing(current))
156 clear_thread_flag(TIF_SIGPENDING);
157
158}
159
160
161
162#define SYNCHRONOUS_MASK \
163 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164 sigmask(SIGTRAP) | sigmask(SIGFPE))
165
166int next_signal(struct sigpending *pending, sigset_t *mask)
167{
168 unsigned long i, *s, *m, x;
169 int sig = 0;
170
171 s = pending->signal.sig;
172 m = mask->sig;
173
174
175
176
177
178 x = *s &~ *m;
179 if (x) {
180 if (x & SYNCHRONOUS_MASK)
181 x &= SYNCHRONOUS_MASK;
182 sig = ffz(~x) + 1;
183 return sig;
184 }
185
186 switch (_NSIG_WORDS) {
187 default:
188 for (i = 1; i < _NSIG_WORDS; ++i) {
189 x = *++s &~ *++m;
190 if (!x)
191 continue;
192 sig = ffz(~x) + i*_NSIG_BPW + 1;
193 break;
194 }
195 break;
196
197 case 2:
198 x = s[1] &~ m[1];
199 if (!x)
200 break;
201 sig = ffz(~x) + _NSIG_BPW + 1;
202 break;
203
204 case 1:
205
206 break;
207 }
208
209 return sig;
210}
211
212static inline void print_dropped_signal(int sig)
213{
214 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
215
216 if (!print_fatal_signals)
217 return;
218
219 if (!__ratelimit(&ratelimit_state))
220 return;
221
222 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223 current->comm, current->pid, sig);
224}
225
226
227
228
229
230
231static struct sigqueue *
232__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
233{
234 struct sigqueue *q = NULL;
235 struct user_struct *user;
236
237
238
239
240
241 rcu_read_lock();
242 user = get_uid(__task_cred(t)->user);
243 atomic_inc(&user->sigpending);
244 rcu_read_unlock();
245
246 if (override_rlimit ||
247 atomic_read(&user->sigpending) <=
248 task_rlimit(t, RLIMIT_SIGPENDING)) {
249 q = kmem_cache_alloc(sigqueue_cachep, flags);
250 } else {
251 print_dropped_signal(sig);
252 }
253
254 if (unlikely(q == NULL)) {
255 atomic_dec(&user->sigpending);
256 free_uid(user);
257 } else {
258 INIT_LIST_HEAD(&q->list);
259 q->flags = 0;
260 q->user = user;
261 }
262
263 return q;
264}
265
266static void __sigqueue_free(struct sigqueue *q)
267{
268 if (q->flags & SIGQUEUE_PREALLOC)
269 return;
270 atomic_dec(&q->user->sigpending);
271 free_uid(q->user);
272 kmem_cache_free(sigqueue_cachep, q);
273}
274
275void flush_sigqueue(struct sigpending *queue)
276{
277 struct sigqueue *q;
278
279 sigemptyset(&queue->signal);
280 while (!list_empty(&queue->list)) {
281 q = list_entry(queue->list.next, struct sigqueue , list);
282 list_del_init(&q->list);
283 __sigqueue_free(q);
284 }
285}
286
287
288
289
290void __flush_signals(struct task_struct *t)
291{
292 clear_tsk_thread_flag(t, TIF_SIGPENDING);
293 flush_sigqueue(&t->pending);
294 flush_sigqueue(&t->signal->shared_pending);
295}
296
297void flush_signals(struct task_struct *t)
298{
299 unsigned long flags;
300
301 spin_lock_irqsave(&t->sighand->siglock, flags);
302 __flush_signals(t);
303 spin_unlock_irqrestore(&t->sighand->siglock, flags);
304}
305
306static void __flush_itimer_signals(struct sigpending *pending)
307{
308 sigset_t signal, retain;
309 struct sigqueue *q, *n;
310
311 signal = pending->signal;
312 sigemptyset(&retain);
313
314 list_for_each_entry_safe(q, n, &pending->list, list) {
315 int sig = q->info.si_signo;
316
317 if (likely(q->info.si_code != SI_TIMER)) {
318 sigaddset(&retain, sig);
319 } else {
320 sigdelset(&signal, sig);
321 list_del_init(&q->list);
322 __sigqueue_free(q);
323 }
324 }
325
326 sigorsets(&pending->signal, &signal, &retain);
327}
328
329void flush_itimer_signals(void)
330{
331 struct task_struct *tsk = current;
332 unsigned long flags;
333
334 spin_lock_irqsave(&tsk->sighand->siglock, flags);
335 __flush_itimer_signals(&tsk->pending);
336 __flush_itimer_signals(&tsk->signal->shared_pending);
337 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
338}
339
340void ignore_signals(struct task_struct *t)
341{
342 int i;
343
344 for (i = 0; i < _NSIG; ++i)
345 t->sighand->action[i].sa.sa_handler = SIG_IGN;
346
347 flush_signals(t);
348}
349
350
351
352
353
354void
355flush_signal_handlers(struct task_struct *t, int force_default)
356{
357 int i;
358 struct k_sigaction *ka = &t->sighand->action[0];
359 for (i = _NSIG ; i != 0 ; i--) {
360 if (force_default || ka->sa.sa_handler != SIG_IGN)
361 ka->sa.sa_handler = SIG_DFL;
362 ka->sa.sa_flags = 0;
363 sigemptyset(&ka->sa.sa_mask);
364 ka++;
365 }
366}
367
368int unhandled_signal(struct task_struct *tsk, int sig)
369{
370 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
371 if (is_global_init(tsk))
372 return 1;
373 if (handler != SIG_IGN && handler != SIG_DFL)
374 return 0;
375 return !tracehook_consider_fatal_signal(tsk, sig);
376}
377
378
379
380
381
382
383
384
385
386
387void
388block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
389{
390 unsigned long flags;
391
392 spin_lock_irqsave(¤t->sighand->siglock, flags);
393 current->notifier_mask = mask;
394 current->notifier_data = priv;
395 current->notifier = notifier;
396 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
397}
398
399
400
401void
402unblock_all_signals(void)
403{
404 unsigned long flags;
405
406 spin_lock_irqsave(¤t->sighand->siglock, flags);
407 current->notifier = NULL;
408 current->notifier_data = NULL;
409 recalc_sigpending();
410 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
411}
412
413static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
414{
415 struct sigqueue *q, *first = NULL;
416
417
418
419
420
421 list_for_each_entry(q, &list->list, list) {
422 if (q->info.si_signo == sig) {
423 if (first)
424 goto still_pending;
425 first = q;
426 }
427 }
428
429 sigdelset(&list->signal, sig);
430
431 if (first) {
432still_pending:
433 list_del_init(&first->list);
434 copy_siginfo(info, &first->info);
435 __sigqueue_free(first);
436 } else {
437
438
439
440
441 info->si_signo = sig;
442 info->si_errno = 0;
443 info->si_code = SI_USER;
444 info->si_pid = 0;
445 info->si_uid = 0;
446 }
447}
448
449static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
450 siginfo_t *info)
451{
452 int sig = next_signal(pending, mask);
453
454 if (sig) {
455 if (current->notifier) {
456 if (sigismember(current->notifier_mask, sig)) {
457 if (!(current->notifier)(current->notifier_data)) {
458 clear_thread_flag(TIF_SIGPENDING);
459 return 0;
460 }
461 }
462 }
463
464 collect_signal(sig, pending, info);
465 }
466
467 return sig;
468}
469
470
471
472
473
474
475
476int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
477{
478 int signr;
479
480
481
482
483 signr = __dequeue_signal(&tsk->pending, mask, info);
484 if (!signr) {
485 signr = __dequeue_signal(&tsk->signal->shared_pending,
486 mask, info);
487
488
489
490
491
492
493
494
495
496
497
498
499
500 if (unlikely(signr == SIGALRM)) {
501 struct hrtimer *tmr = &tsk->signal->real_timer;
502
503 if (!hrtimer_is_queued(tmr) &&
504 tsk->signal->it_real_incr.tv64 != 0) {
505 hrtimer_forward(tmr, tmr->base->get_time(),
506 tsk->signal->it_real_incr);
507 hrtimer_restart(tmr);
508 }
509 }
510 }
511
512 recalc_sigpending();
513 if (!signr)
514 return 0;
515
516 if (unlikely(sig_kernel_stop(signr))) {
517
518
519
520
521
522
523
524
525
526
527
528
529 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
530 }
531 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
532
533
534
535
536
537
538 spin_unlock(&tsk->sighand->siglock);
539 do_schedule_next_timer(info);
540 spin_lock(&tsk->sighand->siglock);
541 }
542 return signr;
543}
544
545
546
547
548
549
550
551
552
553
554
555
556void signal_wake_up(struct task_struct *t, int resume)
557{
558 unsigned int mask;
559
560 set_tsk_thread_flag(t, TIF_SIGPENDING);
561
562
563
564
565
566
567
568
569 mask = TASK_INTERRUPTIBLE;
570 if (resume)
571 mask |= TASK_WAKEKILL;
572 if (!wake_up_state(t, mask))
573 kick_process(t);
574}
575
576
577
578
579
580
581
582
583
584
585static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
586{
587 struct sigqueue *q, *n;
588 sigset_t m;
589
590 sigandsets(&m, mask, &s->signal);
591 if (sigisemptyset(&m))
592 return 0;
593
594 signandsets(&s->signal, &s->signal, mask);
595 list_for_each_entry_safe(q, n, &s->list, list) {
596 if (sigismember(mask, q->info.si_signo)) {
597 list_del_init(&q->list);
598 __sigqueue_free(q);
599 }
600 }
601 return 1;
602}
603
604
605
606
607
608
609static int rm_from_queue(unsigned long mask, struct sigpending *s)
610{
611 struct sigqueue *q, *n;
612
613 if (!sigtestsetmask(&s->signal, mask))
614 return 0;
615
616 sigdelsetmask(&s->signal, mask);
617 list_for_each_entry_safe(q, n, &s->list, list) {
618 if (q->info.si_signo < SIGRTMIN &&
619 (mask & sigmask(q->info.si_signo))) {
620 list_del_init(&q->list);
621 __sigqueue_free(q);
622 }
623 }
624 return 1;
625}
626
627static inline int is_si_special(const struct siginfo *info)
628{
629 return info <= SEND_SIG_FORCED;
630}
631
632static inline bool si_fromuser(const struct siginfo *info)
633{
634 return info == SEND_SIG_NOINFO ||
635 (!is_si_special(info) && SI_FROMUSER(info));
636}
637
638
639
640
641
642static int check_kill_permission(int sig, struct siginfo *info,
643 struct task_struct *t)
644{
645 const struct cred *cred, *tcred;
646 struct pid *sid;
647 int error;
648
649 if (!valid_signal(sig))
650 return -EINVAL;
651
652 if (!si_fromuser(info))
653 return 0;
654
655 error = audit_signal_info(sig, t);
656 if (error)
657 return error;
658
659 cred = current_cred();
660 tcred = __task_cred(t);
661 if (!same_thread_group(current, t) &&
662 (cred->euid ^ tcred->suid) &&
663 (cred->euid ^ tcred->uid) &&
664 (cred->uid ^ tcred->suid) &&
665 (cred->uid ^ tcred->uid) &&
666 !capable(CAP_KILL)) {
667 switch (sig) {
668 case SIGCONT:
669 sid = task_session(t);
670
671
672
673
674 if (!sid || sid == task_session(current))
675 break;
676 default:
677 return -EPERM;
678 }
679 }
680
681 return security_task_kill(t, info, sig, 0);
682}
683
684
685
686
687
688
689
690
691
692
693
694static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
695{
696 struct signal_struct *signal = p->signal;
697 struct task_struct *t;
698
699 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
700
701
702
703 } else if (sig_kernel_stop(sig)) {
704
705
706
707 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
708 t = p;
709 do {
710 rm_from_queue(sigmask(SIGCONT), &t->pending);
711 } while_each_thread(p, t);
712 } else if (sig == SIGCONT) {
713 unsigned int why;
714
715
716
717
718 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
719 t = p;
720 do {
721 unsigned int state;
722 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737 state = __TASK_STOPPED;
738 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
739 set_tsk_thread_flag(t, TIF_SIGPENDING);
740 state |= TASK_INTERRUPTIBLE;
741 }
742 wake_up_state(t, state);
743 } while_each_thread(p, t);
744
745
746
747
748
749
750
751
752
753 why = 0;
754 if (signal->flags & SIGNAL_STOP_STOPPED)
755 why |= SIGNAL_CLD_CONTINUED;
756 else if (signal->group_stop_count)
757 why |= SIGNAL_CLD_STOPPED;
758
759 if (why) {
760
761
762
763
764
765 signal->flags = why | SIGNAL_STOP_CONTINUED;
766 signal->group_stop_count = 0;
767 signal->group_exit_code = 0;
768 } else {
769
770
771
772
773
774 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
775 }
776 }
777
778 return !sig_ignored(p, sig, from_ancestor_ns);
779}
780
781
782
783
784
785
786
787
788
789static inline int wants_signal(int sig, struct task_struct *p)
790{
791 if (sigismember(&p->blocked, sig))
792 return 0;
793 if (p->flags & PF_EXITING)
794 return 0;
795 if (sig == SIGKILL)
796 return 1;
797 if (task_is_stopped_or_traced(p))
798 return 0;
799 return task_curr(p) || !signal_pending(p);
800}
801
802static void complete_signal(int sig, struct task_struct *p, int group)
803{
804 struct signal_struct *signal = p->signal;
805 struct task_struct *t;
806
807
808
809
810
811
812
813 if (wants_signal(sig, p))
814 t = p;
815 else if (!group || thread_group_empty(p))
816
817
818
819
820 return;
821 else {
822
823
824
825 t = signal->curr_target;
826 while (!wants_signal(sig, t)) {
827 t = next_thread(t);
828 if (t == signal->curr_target)
829
830
831
832
833
834 return;
835 }
836 signal->curr_target = t;
837 }
838
839
840
841
842
843 if (sig_fatal(p, sig) &&
844 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
845 !sigismember(&t->real_blocked, sig) &&
846 (sig == SIGKILL ||
847 !tracehook_consider_fatal_signal(t, sig))) {
848
849
850
851 if (!sig_kernel_coredump(sig)) {
852
853
854
855
856
857
858 signal->flags = SIGNAL_GROUP_EXIT;
859 signal->group_exit_code = sig;
860 signal->group_stop_count = 0;
861 t = p;
862 do {
863 sigaddset(&t->pending.signal, SIGKILL);
864 signal_wake_up(t, 1);
865 } while_each_thread(p, t);
866 return;
867 }
868 }
869
870
871
872
873
874 signal_wake_up(t, sig == SIGKILL);
875 return;
876}
877
878static inline int legacy_queue(struct sigpending *signals, int sig)
879{
880 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
881}
882
883static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
884 int group, int from_ancestor_ns)
885{
886 struct sigpending *pending;
887 struct sigqueue *q;
888 int override_rlimit;
889
890 trace_signal_generate(sig, info, t);
891
892 assert_spin_locked(&t->sighand->siglock);
893
894 if (!prepare_signal(sig, t, from_ancestor_ns))
895 return 0;
896
897 pending = group ? &t->signal->shared_pending : &t->pending;
898
899
900
901
902
903 if (legacy_queue(pending, sig))
904 return 0;
905
906
907
908
909 if (info == SEND_SIG_FORCED)
910 goto out_set;
911
912
913
914
915
916
917
918
919
920 if (sig < SIGRTMIN)
921 override_rlimit = (is_si_special(info) || info->si_code >= 0);
922 else
923 override_rlimit = 0;
924
925 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
926 override_rlimit);
927 if (q) {
928 list_add_tail(&q->list, &pending->list);
929 switch ((unsigned long) info) {
930 case (unsigned long) SEND_SIG_NOINFO:
931 q->info.si_signo = sig;
932 q->info.si_errno = 0;
933 q->info.si_code = SI_USER;
934 q->info.si_pid = task_tgid_nr_ns(current,
935 task_active_pid_ns(t));
936 q->info.si_uid = current_uid();
937 break;
938 case (unsigned long) SEND_SIG_PRIV:
939 q->info.si_signo = sig;
940 q->info.si_errno = 0;
941 q->info.si_code = SI_KERNEL;
942 q->info.si_pid = 0;
943 q->info.si_uid = 0;
944 break;
945 default:
946 copy_siginfo(&q->info, info);
947 if (from_ancestor_ns)
948 q->info.si_pid = 0;
949 break;
950 }
951 } else if (!is_si_special(info)) {
952 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
953
954
955
956
957
958 trace_signal_overflow_fail(sig, group, info);
959 return -EAGAIN;
960 } else {
961
962
963
964
965 trace_signal_lose_info(sig, group, info);
966 }
967 }
968
969out_set:
970 signalfd_notify(t, sig);
971 sigaddset(&pending->signal, sig);
972 complete_signal(sig, t, group);
973 return 0;
974}
975
976static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
977 int group)
978{
979 int from_ancestor_ns = 0;
980
981#ifdef CONFIG_PID_NS
982 from_ancestor_ns = si_fromuser(info) &&
983 !task_pid_nr_ns(current, task_active_pid_ns(t));
984#endif
985
986 return __send_signal(sig, info, t, group, from_ancestor_ns);
987}
988
989static void print_fatal_signal(struct pt_regs *regs, int signr)
990{
991 printk("%s/%d: potentially unexpected fatal signal %d.\n",
992 current->comm, task_pid_nr(current), signr);
993
994#if defined(__i386__) && !defined(__arch_um__)
995 printk("code at %08lx: ", regs->ip);
996 {
997 int i;
998 for (i = 0; i < 16; i++) {
999 unsigned char insn;
1000
1001 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1002 break;
1003 printk("%02x ", insn);
1004 }
1005 }
1006#endif
1007 printk("\n");
1008 preempt_disable();
1009 show_regs(regs);
1010 preempt_enable();
1011}
1012
1013static int __init setup_print_fatal_signals(char *str)
1014{
1015 get_option (&str, &print_fatal_signals);
1016
1017 return 1;
1018}
1019
1020__setup("print-fatal-signals=", setup_print_fatal_signals);
1021
1022int
1023__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1024{
1025 return send_signal(sig, info, p, 1);
1026}
1027
1028static int
1029specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1030{
1031 return send_signal(sig, info, t, 0);
1032}
1033
1034int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1035 bool group)
1036{
1037 unsigned long flags;
1038 int ret = -ESRCH;
1039
1040 if (lock_task_sighand(p, &flags)) {
1041 ret = send_signal(sig, info, p, group);
1042 unlock_task_sighand(p, &flags);
1043 }
1044
1045 return ret;
1046}
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059int
1060force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1061{
1062 unsigned long int flags;
1063 int ret, blocked, ignored;
1064 struct k_sigaction *action;
1065
1066 spin_lock_irqsave(&t->sighand->siglock, flags);
1067 action = &t->sighand->action[sig-1];
1068 ignored = action->sa.sa_handler == SIG_IGN;
1069 blocked = sigismember(&t->blocked, sig);
1070 if (blocked || ignored) {
1071 action->sa.sa_handler = SIG_DFL;
1072 if (blocked) {
1073 sigdelset(&t->blocked, sig);
1074 recalc_sigpending_and_wake(t);
1075 }
1076 }
1077 if (action->sa.sa_handler == SIG_DFL)
1078 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1079 ret = specific_send_sig_info(sig, info, t);
1080 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1081
1082 return ret;
1083}
1084
1085
1086
1087
1088int zap_other_threads(struct task_struct *p)
1089{
1090 struct task_struct *t = p;
1091 int count = 0;
1092
1093 p->signal->group_stop_count = 0;
1094
1095 while_each_thread(p, t) {
1096 count++;
1097
1098
1099 if (t->exit_state)
1100 continue;
1101 sigaddset(&t->pending.signal, SIGKILL);
1102 signal_wake_up(t, 1);
1103 }
1104
1105 return count;
1106}
1107
1108struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1109 unsigned long *flags)
1110{
1111 struct sighand_struct *sighand;
1112
1113 rcu_read_lock();
1114 for (;;) {
1115 sighand = rcu_dereference(tsk->sighand);
1116 if (unlikely(sighand == NULL))
1117 break;
1118
1119 spin_lock_irqsave(&sighand->siglock, *flags);
1120 if (likely(sighand == tsk->sighand))
1121 break;
1122 spin_unlock_irqrestore(&sighand->siglock, *flags);
1123 }
1124 rcu_read_unlock();
1125
1126 return sighand;
1127}
1128
1129
1130
1131
1132int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1133{
1134 int ret;
1135
1136 rcu_read_lock();
1137 ret = check_kill_permission(sig, info, p);
1138 rcu_read_unlock();
1139
1140 if (!ret && sig)
1141 ret = do_send_sig_info(sig, info, p, true);
1142
1143 return ret;
1144}
1145
1146
1147
1148
1149
1150
1151int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1152{
1153 struct task_struct *p = NULL;
1154 int retval, success;
1155
1156 success = 0;
1157 retval = -ESRCH;
1158 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1159 int err = group_send_sig_info(sig, info, p);
1160 success |= !err;
1161 retval = err;
1162 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1163 return success ? 0 : retval;
1164}
1165
1166int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1167{
1168 int error = -ESRCH;
1169 struct task_struct *p;
1170
1171 rcu_read_lock();
1172retry:
1173 p = pid_task(pid, PIDTYPE_PID);
1174 if (p) {
1175 error = group_send_sig_info(sig, info, p);
1176 if (unlikely(error == -ESRCH))
1177
1178
1179
1180
1181
1182
1183 goto retry;
1184 }
1185 rcu_read_unlock();
1186
1187 return error;
1188}
1189
1190int
1191kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1192{
1193 int error;
1194 rcu_read_lock();
1195 error = kill_pid_info(sig, info, find_vpid(pid));
1196 rcu_read_unlock();
1197 return error;
1198}
1199
1200
1201int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1202 uid_t uid, uid_t euid, u32 secid)
1203{
1204 int ret = -EINVAL;
1205 struct task_struct *p;
1206 const struct cred *pcred;
1207 unsigned long flags;
1208
1209 if (!valid_signal(sig))
1210 return ret;
1211
1212 rcu_read_lock();
1213 p = pid_task(pid, PIDTYPE_PID);
1214 if (!p) {
1215 ret = -ESRCH;
1216 goto out_unlock;
1217 }
1218 pcred = __task_cred(p);
1219 if (si_fromuser(info) &&
1220 euid != pcred->suid && euid != pcred->uid &&
1221 uid != pcred->suid && uid != pcred->uid) {
1222 ret = -EPERM;
1223 goto out_unlock;
1224 }
1225 ret = security_task_kill(p, info, sig, secid);
1226 if (ret)
1227 goto out_unlock;
1228
1229 if (sig) {
1230 if (lock_task_sighand(p, &flags)) {
1231 ret = __send_signal(sig, info, p, 1, 0);
1232 unlock_task_sighand(p, &flags);
1233 } else
1234 ret = -ESRCH;
1235 }
1236out_unlock:
1237 rcu_read_unlock();
1238 return ret;
1239}
1240EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1241
1242
1243
1244
1245
1246
1247
1248
1249static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1250{
1251 int ret;
1252
1253 if (pid > 0) {
1254 rcu_read_lock();
1255 ret = kill_pid_info(sig, info, find_vpid(pid));
1256 rcu_read_unlock();
1257 return ret;
1258 }
1259
1260 read_lock(&tasklist_lock);
1261 if (pid != -1) {
1262 ret = __kill_pgrp_info(sig, info,
1263 pid ? find_vpid(-pid) : task_pgrp(current));
1264 } else {
1265 int retval = 0, count = 0;
1266 struct task_struct * p;
1267
1268 for_each_process(p) {
1269 if (task_pid_vnr(p) > 1 &&
1270 !same_thread_group(p, current)) {
1271 int err = group_send_sig_info(sig, info, p);
1272 ++count;
1273 if (err != -EPERM)
1274 retval = err;
1275 }
1276 }
1277 ret = count ? retval : -ESRCH;
1278 }
1279 read_unlock(&tasklist_lock);
1280
1281 return ret;
1282}
1283
1284
1285
1286
1287
1288int
1289send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1290{
1291
1292
1293
1294
1295 if (!valid_signal(sig))
1296 return -EINVAL;
1297
1298 return do_send_sig_info(sig, info, p, false);
1299}
1300
1301#define __si_special(priv) \
1302 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1303
1304int
1305send_sig(int sig, struct task_struct *p, int priv)
1306{
1307 return send_sig_info(sig, __si_special(priv), p);
1308}
1309
1310void
1311force_sig(int sig, struct task_struct *p)
1312{
1313 force_sig_info(sig, SEND_SIG_PRIV, p);
1314}
1315
1316
1317
1318
1319
1320
1321
1322int
1323force_sigsegv(int sig, struct task_struct *p)
1324{
1325 if (sig == SIGSEGV) {
1326 unsigned long flags;
1327 spin_lock_irqsave(&p->sighand->siglock, flags);
1328 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1329 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1330 }
1331 force_sig(SIGSEGV, p);
1332 return 0;
1333}
1334
1335int kill_pgrp(struct pid *pid, int sig, int priv)
1336{
1337 int ret;
1338
1339 read_lock(&tasklist_lock);
1340 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1341 read_unlock(&tasklist_lock);
1342
1343 return ret;
1344}
1345EXPORT_SYMBOL(kill_pgrp);
1346
1347int kill_pid(struct pid *pid, int sig, int priv)
1348{
1349 return kill_pid_info(sig, __si_special(priv), pid);
1350}
1351EXPORT_SYMBOL(kill_pid);
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362struct sigqueue *sigqueue_alloc(void)
1363{
1364 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1365
1366 if (q)
1367 q->flags |= SIGQUEUE_PREALLOC;
1368
1369 return q;
1370}
1371
1372void sigqueue_free(struct sigqueue *q)
1373{
1374 unsigned long flags;
1375 spinlock_t *lock = ¤t->sighand->siglock;
1376
1377 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1378
1379
1380
1381
1382
1383 spin_lock_irqsave(lock, flags);
1384 q->flags &= ~SIGQUEUE_PREALLOC;
1385
1386
1387
1388
1389 if (!list_empty(&q->list))
1390 q = NULL;
1391 spin_unlock_irqrestore(lock, flags);
1392
1393 if (q)
1394 __sigqueue_free(q);
1395}
1396
1397int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1398{
1399 int sig = q->info.si_signo;
1400 struct sigpending *pending;
1401 unsigned long flags;
1402 int ret;
1403
1404 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1405
1406 ret = -1;
1407 if (!likely(lock_task_sighand(t, &flags)))
1408 goto ret;
1409
1410 ret = 1;
1411 if (!prepare_signal(sig, t, 0))
1412 goto out;
1413
1414 ret = 0;
1415 if (unlikely(!list_empty(&q->list))) {
1416
1417
1418
1419
1420 BUG_ON(q->info.si_code != SI_TIMER);
1421 q->info.si_overrun++;
1422 goto out;
1423 }
1424 q->info.si_overrun = 0;
1425
1426 signalfd_notify(t, sig);
1427 pending = group ? &t->signal->shared_pending : &t->pending;
1428 list_add_tail(&q->list, &pending->list);
1429 sigaddset(&pending->signal, sig);
1430 complete_signal(sig, t, group);
1431out:
1432 unlock_task_sighand(t, &flags);
1433ret:
1434 return ret;
1435}
1436
1437
1438
1439
1440
1441
1442
1443
1444int do_notify_parent(struct task_struct *tsk, int sig)
1445{
1446 struct siginfo info;
1447 unsigned long flags;
1448 struct sighand_struct *psig;
1449 int ret = sig;
1450
1451 BUG_ON(sig == -1);
1452
1453
1454 BUG_ON(task_is_stopped_or_traced(tsk));
1455
1456 BUG_ON(!task_ptrace(tsk) &&
1457 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1458
1459 info.si_signo = sig;
1460 info.si_errno = 0;
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473 rcu_read_lock();
1474 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1475 info.si_uid = __task_cred(tsk)->uid;
1476 rcu_read_unlock();
1477
1478 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1479 tsk->signal->utime));
1480 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1481 tsk->signal->stime));
1482
1483 info.si_status = tsk->exit_code & 0x7f;
1484 if (tsk->exit_code & 0x80)
1485 info.si_code = CLD_DUMPED;
1486 else if (tsk->exit_code & 0x7f)
1487 info.si_code = CLD_KILLED;
1488 else {
1489 info.si_code = CLD_EXITED;
1490 info.si_status = tsk->exit_code >> 8;
1491 }
1492
1493 psig = tsk->parent->sighand;
1494 spin_lock_irqsave(&psig->siglock, flags);
1495 if (!task_ptrace(tsk) && sig == SIGCHLD &&
1496 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1497 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513 ret = tsk->exit_signal = -1;
1514 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1515 sig = -1;
1516 }
1517 if (valid_signal(sig) && sig > 0)
1518 __group_send_sig_info(sig, &info, tsk->parent);
1519 __wake_up_parent(tsk, tsk->parent);
1520 spin_unlock_irqrestore(&psig->siglock, flags);
1521
1522 return ret;
1523}
1524
1525static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1526{
1527 struct siginfo info;
1528 unsigned long flags;
1529 struct task_struct *parent;
1530 struct sighand_struct *sighand;
1531
1532 if (task_ptrace(tsk))
1533 parent = tsk->parent;
1534 else {
1535 tsk = tsk->group_leader;
1536 parent = tsk->real_parent;
1537 }
1538
1539 info.si_signo = SIGCHLD;
1540 info.si_errno = 0;
1541
1542
1543
1544 rcu_read_lock();
1545 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1546 info.si_uid = __task_cred(tsk)->uid;
1547 rcu_read_unlock();
1548
1549 info.si_utime = cputime_to_clock_t(tsk->utime);
1550 info.si_stime = cputime_to_clock_t(tsk->stime);
1551
1552 info.si_code = why;
1553 switch (why) {
1554 case CLD_CONTINUED:
1555 info.si_status = SIGCONT;
1556 break;
1557 case CLD_STOPPED:
1558 info.si_status = tsk->signal->group_exit_code & 0x7f;
1559 break;
1560 case CLD_TRAPPED:
1561 info.si_status = tsk->exit_code & 0x7f;
1562 break;
1563 default:
1564 BUG();
1565 }
1566
1567 sighand = parent->sighand;
1568 spin_lock_irqsave(&sighand->siglock, flags);
1569 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1570 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1571 __group_send_sig_info(SIGCHLD, &info, parent);
1572
1573
1574
1575 __wake_up_parent(tsk, parent);
1576 spin_unlock_irqrestore(&sighand->siglock, flags);
1577}
1578
1579static inline int may_ptrace_stop(void)
1580{
1581 if (!likely(task_ptrace(current)))
1582 return 0;
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592 if (unlikely(current->mm->core_state) &&
1593 unlikely(current->mm == current->parent->mm))
1594 return 0;
1595
1596 return 1;
1597}
1598
1599
1600
1601
1602
1603static int sigkill_pending(struct task_struct *tsk)
1604{
1605 return sigismember(&tsk->pending.signal, SIGKILL) ||
1606 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1607}
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1621 __releases(¤t->sighand->siglock)
1622 __acquires(¤t->sighand->siglock)
1623{
1624 if (arch_ptrace_stop_needed(exit_code, info)) {
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636 spin_unlock_irq(¤t->sighand->siglock);
1637 arch_ptrace_stop(exit_code, info);
1638 spin_lock_irq(¤t->sighand->siglock);
1639 if (sigkill_pending(current))
1640 return;
1641 }
1642
1643
1644
1645
1646
1647 if (current->signal->group_stop_count > 0)
1648 --current->signal->group_stop_count;
1649
1650 current->last_siginfo = info;
1651 current->exit_code = exit_code;
1652
1653
1654 __set_current_state(TASK_TRACED);
1655 spin_unlock_irq(¤t->sighand->siglock);
1656 read_lock(&tasklist_lock);
1657 if (may_ptrace_stop()) {
1658 do_notify_parent_cldstop(current, CLD_TRAPPED);
1659
1660
1661
1662
1663
1664
1665 preempt_disable();
1666 read_unlock(&tasklist_lock);
1667 preempt_enable_no_resched();
1668 schedule();
1669 } else {
1670
1671
1672
1673
1674 __set_current_state(TASK_RUNNING);
1675 if (clear_code)
1676 current->exit_code = 0;
1677 read_unlock(&tasklist_lock);
1678 }
1679
1680
1681
1682
1683
1684
1685 try_to_freeze();
1686
1687
1688
1689
1690
1691
1692 spin_lock_irq(¤t->sighand->siglock);
1693 current->last_siginfo = NULL;
1694
1695
1696
1697
1698
1699
1700 recalc_sigpending_tsk(current);
1701}
1702
1703void ptrace_notify(int exit_code)
1704{
1705 siginfo_t info;
1706
1707 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1708
1709 memset(&info, 0, sizeof info);
1710 info.si_signo = SIGTRAP;
1711 info.si_code = exit_code;
1712 info.si_pid = task_pid_vnr(current);
1713 info.si_uid = current_uid();
1714
1715
1716 spin_lock_irq(¤t->sighand->siglock);
1717 ptrace_stop(exit_code, 1, &info);
1718 spin_unlock_irq(¤t->sighand->siglock);
1719}
1720
1721
1722
1723
1724
1725
1726
1727static int do_signal_stop(int signr)
1728{
1729 struct signal_struct *sig = current->signal;
1730 int notify;
1731
1732 if (!sig->group_stop_count) {
1733 struct task_struct *t;
1734
1735 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1736 unlikely(signal_group_exit(sig)))
1737 return 0;
1738
1739
1740
1741
1742 sig->group_exit_code = signr;
1743
1744 sig->group_stop_count = 1;
1745 for (t = next_thread(current); t != current; t = next_thread(t))
1746
1747
1748
1749
1750
1751 if (!(t->flags & PF_EXITING) &&
1752 !task_is_stopped_or_traced(t)) {
1753 sig->group_stop_count++;
1754 signal_wake_up(t, 0);
1755 }
1756 }
1757
1758
1759
1760
1761
1762 notify = sig->group_stop_count == 1 ? CLD_STOPPED : 0;
1763 notify = tracehook_notify_jctl(notify, CLD_STOPPED);
1764
1765
1766
1767
1768
1769 if (sig->group_stop_count) {
1770 if (!--sig->group_stop_count)
1771 sig->flags = SIGNAL_STOP_STOPPED;
1772 current->exit_code = sig->group_exit_code;
1773 __set_current_state(TASK_STOPPED);
1774 }
1775 spin_unlock_irq(¤t->sighand->siglock);
1776
1777 if (notify) {
1778 read_lock(&tasklist_lock);
1779 do_notify_parent_cldstop(current, notify);
1780 read_unlock(&tasklist_lock);
1781 }
1782
1783
1784 do {
1785 schedule();
1786 } while (try_to_freeze());
1787
1788 tracehook_finish_jctl();
1789 current->exit_code = 0;
1790
1791 return 1;
1792}
1793
1794static int ptrace_signal(int signr, siginfo_t *info,
1795 struct pt_regs *regs, void *cookie)
1796{
1797 if (!task_ptrace(current))
1798 return signr;
1799
1800 ptrace_signal_deliver(regs, cookie);
1801
1802
1803 ptrace_stop(signr, 0, info);
1804
1805
1806 signr = current->exit_code;
1807 if (signr == 0)
1808 return signr;
1809
1810 current->exit_code = 0;
1811
1812
1813
1814
1815
1816 if (signr != info->si_signo) {
1817 info->si_signo = signr;
1818 info->si_errno = 0;
1819 info->si_code = SI_USER;
1820 info->si_pid = task_pid_vnr(current->parent);
1821 info->si_uid = task_uid(current->parent);
1822 }
1823
1824
1825 if (sigismember(¤t->blocked, signr)) {
1826 specific_send_sig_info(signr, info, current);
1827 signr = 0;
1828 }
1829
1830 return signr;
1831}
1832
1833int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1834 struct pt_regs *regs, void *cookie)
1835{
1836 struct sighand_struct *sighand = current->sighand;
1837 struct signal_struct *signal = current->signal;
1838 int signr;
1839
1840relock:
1841
1842
1843
1844
1845
1846
1847 try_to_freeze();
1848
1849 spin_lock_irq(&sighand->siglock);
1850
1851
1852
1853
1854
1855 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1856 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1857 ? CLD_CONTINUED : CLD_STOPPED;
1858 signal->flags &= ~SIGNAL_CLD_MASK;
1859
1860 why = tracehook_notify_jctl(why, CLD_CONTINUED);
1861 spin_unlock_irq(&sighand->siglock);
1862
1863 if (why) {
1864 read_lock(&tasklist_lock);
1865 do_notify_parent_cldstop(current->group_leader, why);
1866 read_unlock(&tasklist_lock);
1867 }
1868 goto relock;
1869 }
1870
1871 for (;;) {
1872 struct k_sigaction *ka;
1873
1874
1875
1876
1877
1878 signr = tracehook_get_signal(current, regs, info, return_ka);
1879 if (unlikely(signr < 0))
1880 goto relock;
1881 if (unlikely(signr != 0))
1882 ka = return_ka;
1883 else {
1884 if (unlikely(signal->group_stop_count > 0) &&
1885 do_signal_stop(0))
1886 goto relock;
1887
1888 signr = dequeue_signal(current, ¤t->blocked,
1889 info);
1890
1891 if (!signr)
1892 break;
1893
1894 if (signr != SIGKILL) {
1895 signr = ptrace_signal(signr, info,
1896 regs, cookie);
1897 if (!signr)
1898 continue;
1899 }
1900
1901 ka = &sighand->action[signr-1];
1902 }
1903
1904
1905 trace_signal_deliver(signr, info, ka);
1906
1907 if (ka->sa.sa_handler == SIG_IGN)
1908 continue;
1909 if (ka->sa.sa_handler != SIG_DFL) {
1910
1911 *return_ka = *ka;
1912
1913 if (ka->sa.sa_flags & SA_ONESHOT)
1914 ka->sa.sa_handler = SIG_DFL;
1915
1916 break;
1917 }
1918
1919
1920
1921
1922 if (sig_kernel_ignore(signr))
1923 continue;
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1936 !sig_kernel_only(signr))
1937 continue;
1938
1939 if (sig_kernel_stop(signr)) {
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950 if (signr != SIGSTOP) {
1951 spin_unlock_irq(&sighand->siglock);
1952
1953
1954
1955 if (is_current_pgrp_orphaned())
1956 goto relock;
1957
1958 spin_lock_irq(&sighand->siglock);
1959 }
1960
1961 if (likely(do_signal_stop(info->si_signo))) {
1962
1963 goto relock;
1964 }
1965
1966
1967
1968
1969
1970 continue;
1971 }
1972
1973 spin_unlock_irq(&sighand->siglock);
1974
1975
1976
1977
1978 current->flags |= PF_SIGNALED;
1979
1980 if (sig_kernel_coredump(signr)) {
1981 if (print_fatal_signals)
1982 print_fatal_signal(regs, info->si_signo);
1983
1984
1985
1986
1987
1988
1989
1990
1991 do_coredump(info->si_signo, info->si_signo, regs);
1992 }
1993
1994
1995
1996
1997 do_group_exit(info->si_signo);
1998
1999 }
2000 spin_unlock_irq(&sighand->siglock);
2001 return signr;
2002}
2003
2004void exit_signals(struct task_struct *tsk)
2005{
2006 int group_stop = 0;
2007 struct task_struct *t;
2008
2009 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2010 tsk->flags |= PF_EXITING;
2011 return;
2012 }
2013
2014 spin_lock_irq(&tsk->sighand->siglock);
2015
2016
2017
2018
2019 tsk->flags |= PF_EXITING;
2020 if (!signal_pending(tsk))
2021 goto out;
2022
2023
2024
2025
2026
2027 for (t = tsk; (t = next_thread(t)) != tsk; )
2028 if (!signal_pending(t) && !(t->flags & PF_EXITING))
2029 recalc_sigpending_and_wake(t);
2030
2031 if (unlikely(tsk->signal->group_stop_count) &&
2032 !--tsk->signal->group_stop_count) {
2033 tsk->signal->flags = SIGNAL_STOP_STOPPED;
2034 group_stop = tracehook_notify_jctl(CLD_STOPPED, CLD_STOPPED);
2035 }
2036out:
2037 spin_unlock_irq(&tsk->sighand->siglock);
2038
2039 if (unlikely(group_stop)) {
2040 read_lock(&tasklist_lock);
2041 do_notify_parent_cldstop(tsk, group_stop);
2042 read_unlock(&tasklist_lock);
2043 }
2044}
2045
2046EXPORT_SYMBOL(recalc_sigpending);
2047EXPORT_SYMBOL_GPL(dequeue_signal);
2048EXPORT_SYMBOL(flush_signals);
2049EXPORT_SYMBOL(force_sig);
2050EXPORT_SYMBOL(send_sig);
2051EXPORT_SYMBOL(send_sig_info);
2052EXPORT_SYMBOL(sigprocmask);
2053EXPORT_SYMBOL(block_all_signals);
2054EXPORT_SYMBOL(unblock_all_signals);
2055
2056
2057
2058
2059
2060
2061SYSCALL_DEFINE0(restart_syscall)
2062{
2063 struct restart_block *restart = ¤t_thread_info()->restart_block;
2064 return restart->fn(restart);
2065}
2066
2067long do_no_restart_syscall(struct restart_block *param)
2068{
2069 return -EINTR;
2070}
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2087{
2088 int error;
2089
2090 spin_lock_irq(¤t->sighand->siglock);
2091 if (oldset)
2092 *oldset = current->blocked;
2093
2094 error = 0;
2095 switch (how) {
2096 case SIG_BLOCK:
2097 sigorsets(¤t->blocked, ¤t->blocked, set);
2098 break;
2099 case SIG_UNBLOCK:
2100 signandsets(¤t->blocked, ¤t->blocked, set);
2101 break;
2102 case SIG_SETMASK:
2103 current->blocked = *set;
2104 break;
2105 default:
2106 error = -EINVAL;
2107 }
2108 recalc_sigpending();
2109 spin_unlock_irq(¤t->sighand->siglock);
2110
2111 return error;
2112}
2113
2114SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2115 sigset_t __user *, oset, size_t, sigsetsize)
2116{
2117 int error = -EINVAL;
2118 sigset_t old_set, new_set;
2119
2120
2121 if (sigsetsize != sizeof(sigset_t))
2122 goto out;
2123
2124 if (set) {
2125 error = -EFAULT;
2126 if (copy_from_user(&new_set, set, sizeof(*set)))
2127 goto out;
2128 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2129
2130 error = sigprocmask(how, &new_set, &old_set);
2131 if (error)
2132 goto out;
2133 if (oset)
2134 goto set_old;
2135 } else if (oset) {
2136 spin_lock_irq(¤t->sighand->siglock);
2137 old_set = current->blocked;
2138 spin_unlock_irq(¤t->sighand->siglock);
2139
2140 set_old:
2141 error = -EFAULT;
2142 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2143 goto out;
2144 }
2145 error = 0;
2146out:
2147 return error;
2148}
2149
2150long do_sigpending(void __user *set, unsigned long sigsetsize)
2151{
2152 long error = -EINVAL;
2153 sigset_t pending;
2154
2155 if (sigsetsize > sizeof(sigset_t))
2156 goto out;
2157
2158 spin_lock_irq(¤t->sighand->siglock);
2159 sigorsets(&pending, ¤t->pending.signal,
2160 ¤t->signal->shared_pending.signal);
2161 spin_unlock_irq(¤t->sighand->siglock);
2162
2163
2164 sigandsets(&pending, ¤t->blocked, &pending);
2165
2166 error = -EFAULT;
2167 if (!copy_to_user(set, &pending, sigsetsize))
2168 error = 0;
2169
2170out:
2171 return error;
2172}
2173
2174SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2175{
2176 return do_sigpending(set, sigsetsize);
2177}
2178
2179#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2180
2181int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2182{
2183 int err;
2184
2185 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2186 return -EFAULT;
2187 if (from->si_code < 0)
2188 return __copy_to_user(to, from, sizeof(siginfo_t))
2189 ? -EFAULT : 0;
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199 err = __put_user(from->si_signo, &to->si_signo);
2200 err |= __put_user(from->si_errno, &to->si_errno);
2201 err |= __put_user((short)from->si_code, &to->si_code);
2202 switch (from->si_code & __SI_MASK) {
2203 case __SI_KILL:
2204 err |= __put_user(from->si_pid, &to->si_pid);
2205 err |= __put_user(from->si_uid, &to->si_uid);
2206 break;
2207 case __SI_TIMER:
2208 err |= __put_user(from->si_tid, &to->si_tid);
2209 err |= __put_user(from->si_overrun, &to->si_overrun);
2210 err |= __put_user(from->si_ptr, &to->si_ptr);
2211 break;
2212 case __SI_POLL:
2213 err |= __put_user(from->si_band, &to->si_band);
2214 err |= __put_user(from->si_fd, &to->si_fd);
2215 break;
2216 case __SI_FAULT:
2217 err |= __put_user(from->si_addr, &to->si_addr);
2218#ifdef __ARCH_SI_TRAPNO
2219 err |= __put_user(from->si_trapno, &to->si_trapno);
2220#endif
2221#ifdef BUS_MCEERR_AO
2222
2223
2224
2225
2226 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2227 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2228#endif
2229 break;
2230 case __SI_CHLD:
2231 err |= __put_user(from->si_pid, &to->si_pid);
2232 err |= __put_user(from->si_uid, &to->si_uid);
2233 err |= __put_user(from->si_status, &to->si_status);
2234 err |= __put_user(from->si_utime, &to->si_utime);
2235 err |= __put_user(from->si_stime, &to->si_stime);
2236 break;
2237 case __SI_RT:
2238 case __SI_MESGQ:
2239 err |= __put_user(from->si_pid, &to->si_pid);
2240 err |= __put_user(from->si_uid, &to->si_uid);
2241 err |= __put_user(from->si_ptr, &to->si_ptr);
2242 break;
2243 default:
2244 err |= __put_user(from->si_pid, &to->si_pid);
2245 err |= __put_user(from->si_uid, &to->si_uid);
2246 break;
2247 }
2248 return err;
2249}
2250
2251#endif
2252
2253SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2254 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2255 size_t, sigsetsize)
2256{
2257 int ret, sig;
2258 sigset_t these;
2259 struct timespec ts;
2260 siginfo_t info;
2261 long timeout = 0;
2262
2263
2264 if (sigsetsize != sizeof(sigset_t))
2265 return -EINVAL;
2266
2267 if (copy_from_user(&these, uthese, sizeof(these)))
2268 return -EFAULT;
2269
2270
2271
2272
2273
2274 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2275 signotset(&these);
2276
2277 if (uts) {
2278 if (copy_from_user(&ts, uts, sizeof(ts)))
2279 return -EFAULT;
2280 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2281 || ts.tv_sec < 0)
2282 return -EINVAL;
2283 }
2284
2285 spin_lock_irq(¤t->sighand->siglock);
2286 sig = dequeue_signal(current, &these, &info);
2287 if (!sig) {
2288 timeout = MAX_SCHEDULE_TIMEOUT;
2289 if (uts)
2290 timeout = (timespec_to_jiffies(&ts)
2291 + (ts.tv_sec || ts.tv_nsec));
2292
2293 if (timeout) {
2294
2295
2296
2297 current->real_blocked = current->blocked;
2298 sigandsets(¤t->blocked, ¤t->blocked, &these);
2299 recalc_sigpending();
2300 spin_unlock_irq(¤t->sighand->siglock);
2301
2302 timeout = schedule_timeout_interruptible(timeout);
2303
2304 spin_lock_irq(¤t->sighand->siglock);
2305 sig = dequeue_signal(current, &these, &info);
2306 current->blocked = current->real_blocked;
2307 siginitset(¤t->real_blocked, 0);
2308 recalc_sigpending();
2309 }
2310 }
2311 spin_unlock_irq(¤t->sighand->siglock);
2312
2313 if (sig) {
2314 ret = sig;
2315 if (uinfo) {
2316 if (copy_siginfo_to_user(uinfo, &info))
2317 ret = -EFAULT;
2318 }
2319 } else {
2320 ret = -EAGAIN;
2321 if (timeout)
2322 ret = -EINTR;
2323 }
2324
2325 return ret;
2326}
2327
2328SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2329{
2330 struct siginfo info;
2331
2332 info.si_signo = sig;
2333 info.si_errno = 0;
2334 info.si_code = SI_USER;
2335 info.si_pid = task_tgid_vnr(current);
2336 info.si_uid = current_uid();
2337
2338 return kill_something_info(sig, &info, pid);
2339}
2340
2341static int
2342do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2343{
2344 struct task_struct *p;
2345 int error = -ESRCH;
2346
2347 rcu_read_lock();
2348 p = find_task_by_vpid(pid);
2349 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2350 error = check_kill_permission(sig, info, p);
2351
2352
2353
2354
2355 if (!error && sig) {
2356 error = do_send_sig_info(sig, info, p, false);
2357
2358
2359
2360
2361
2362 if (unlikely(error == -ESRCH))
2363 error = 0;
2364 }
2365 }
2366 rcu_read_unlock();
2367
2368 return error;
2369}
2370
2371static int do_tkill(pid_t tgid, pid_t pid, int sig)
2372{
2373 struct siginfo info;
2374
2375 info.si_signo = sig;
2376 info.si_errno = 0;
2377 info.si_code = SI_TKILL;
2378 info.si_pid = task_tgid_vnr(current);
2379 info.si_uid = current_uid();
2380
2381 return do_send_specific(tgid, pid, sig, &info);
2382}
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2395{
2396
2397 if (pid <= 0 || tgid <= 0)
2398 return -EINVAL;
2399
2400 return do_tkill(tgid, pid, sig);
2401}
2402
2403
2404
2405
2406SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2407{
2408
2409 if (pid <= 0)
2410 return -EINVAL;
2411
2412 return do_tkill(0, pid, sig);
2413}
2414
2415SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2416 siginfo_t __user *, uinfo)
2417{
2418 siginfo_t info;
2419
2420 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2421 return -EFAULT;
2422
2423
2424
2425 if (info.si_code >= 0)
2426 return -EPERM;
2427 info.si_signo = sig;
2428
2429
2430 return kill_proc_info(sig, &info, pid);
2431}
2432
2433long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2434{
2435
2436 if (pid <= 0 || tgid <= 0)
2437 return -EINVAL;
2438
2439
2440
2441 if (info->si_code >= 0)
2442 return -EPERM;
2443 info->si_signo = sig;
2444
2445 return do_send_specific(tgid, pid, sig, info);
2446}
2447
2448SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2449 siginfo_t __user *, uinfo)
2450{
2451 siginfo_t info;
2452
2453 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2454 return -EFAULT;
2455
2456 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2457}
2458
2459int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2460{
2461 struct task_struct *t = current;
2462 struct k_sigaction *k;
2463 sigset_t mask;
2464
2465 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2466 return -EINVAL;
2467
2468 k = &t->sighand->action[sig-1];
2469
2470 spin_lock_irq(¤t->sighand->siglock);
2471 if (oact)
2472 *oact = *k;
2473
2474 if (act) {
2475 sigdelsetmask(&act->sa.sa_mask,
2476 sigmask(SIGKILL) | sigmask(SIGSTOP));
2477 *k = *act;
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2490 sigemptyset(&mask);
2491 sigaddset(&mask, sig);
2492 rm_from_queue_full(&mask, &t->signal->shared_pending);
2493 do {
2494 rm_from_queue_full(&mask, &t->pending);
2495 t = next_thread(t);
2496 } while (t != current);
2497 }
2498 }
2499
2500 spin_unlock_irq(¤t->sighand->siglock);
2501 return 0;
2502}
2503
2504int
2505do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2506{
2507 stack_t oss;
2508 int error;
2509
2510 oss.ss_sp = (void __user *) current->sas_ss_sp;
2511 oss.ss_size = current->sas_ss_size;
2512 oss.ss_flags = sas_ss_flags(sp);
2513
2514 if (uss) {
2515 void __user *ss_sp;
2516 size_t ss_size;
2517 int ss_flags;
2518
2519 error = -EFAULT;
2520 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2521 goto out;
2522 error = __get_user(ss_sp, &uss->ss_sp) |
2523 __get_user(ss_flags, &uss->ss_flags) |
2524 __get_user(ss_size, &uss->ss_size);
2525 if (error)
2526 goto out;
2527
2528 error = -EPERM;
2529 if (on_sig_stack(sp))
2530 goto out;
2531
2532 error = -EINVAL;
2533
2534
2535
2536
2537
2538
2539
2540
2541 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2542 goto out;
2543
2544 if (ss_flags == SS_DISABLE) {
2545 ss_size = 0;
2546 ss_sp = NULL;
2547 } else {
2548 error = -ENOMEM;
2549 if (ss_size < MINSIGSTKSZ)
2550 goto out;
2551 }
2552
2553 current->sas_ss_sp = (unsigned long) ss_sp;
2554 current->sas_ss_size = ss_size;
2555 }
2556
2557 error = 0;
2558 if (uoss) {
2559 error = -EFAULT;
2560 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2561 goto out;
2562 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2563 __put_user(oss.ss_size, &uoss->ss_size) |
2564 __put_user(oss.ss_flags, &uoss->ss_flags);
2565 }
2566
2567out:
2568 return error;
2569}
2570
2571#ifdef __ARCH_WANT_SYS_SIGPENDING
2572
2573SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2574{
2575 return do_sigpending(set, sizeof(*set));
2576}
2577
2578#endif
2579
2580#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2581
2582
2583
2584SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2585 old_sigset_t __user *, oset)
2586{
2587 int error;
2588 old_sigset_t old_set, new_set;
2589
2590 if (set) {
2591 error = -EFAULT;
2592 if (copy_from_user(&new_set, set, sizeof(*set)))
2593 goto out;
2594 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2595
2596 spin_lock_irq(¤t->sighand->siglock);
2597 old_set = current->blocked.sig[0];
2598
2599 error = 0;
2600 switch (how) {
2601 default:
2602 error = -EINVAL;
2603 break;
2604 case SIG_BLOCK:
2605 sigaddsetmask(¤t->blocked, new_set);
2606 break;
2607 case SIG_UNBLOCK:
2608 sigdelsetmask(¤t->blocked, new_set);
2609 break;
2610 case SIG_SETMASK:
2611 current->blocked.sig[0] = new_set;
2612 break;
2613 }
2614
2615 recalc_sigpending();
2616 spin_unlock_irq(¤t->sighand->siglock);
2617 if (error)
2618 goto out;
2619 if (oset)
2620 goto set_old;
2621 } else if (oset) {
2622 old_set = current->blocked.sig[0];
2623 set_old:
2624 error = -EFAULT;
2625 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2626 goto out;
2627 }
2628 error = 0;
2629out:
2630 return error;
2631}
2632#endif
2633
2634#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2635SYSCALL_DEFINE4(rt_sigaction, int, sig,
2636 const struct sigaction __user *, act,
2637 struct sigaction __user *, oact,
2638 size_t, sigsetsize)
2639{
2640 struct k_sigaction new_sa, old_sa;
2641 int ret = -EINVAL;
2642
2643
2644 if (sigsetsize != sizeof(sigset_t))
2645 goto out;
2646
2647 if (act) {
2648 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2649 return -EFAULT;
2650 }
2651
2652 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2653
2654 if (!ret && oact) {
2655 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2656 return -EFAULT;
2657 }
2658out:
2659 return ret;
2660}
2661#endif
2662
2663#ifdef __ARCH_WANT_SYS_SGETMASK
2664
2665
2666
2667
2668SYSCALL_DEFINE0(sgetmask)
2669{
2670
2671 return current->blocked.sig[0];
2672}
2673
2674SYSCALL_DEFINE1(ssetmask, int, newmask)
2675{
2676 int old;
2677
2678 spin_lock_irq(¤t->sighand->siglock);
2679 old = current->blocked.sig[0];
2680
2681 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2682 sigmask(SIGSTOP)));
2683 recalc_sigpending();
2684 spin_unlock_irq(¤t->sighand->siglock);
2685
2686 return old;
2687}
2688#endif
2689
2690#ifdef __ARCH_WANT_SYS_SIGNAL
2691
2692
2693
2694SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
2695{
2696 struct k_sigaction new_sa, old_sa;
2697 int ret;
2698
2699 new_sa.sa.sa_handler = handler;
2700 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2701 sigemptyset(&new_sa.sa.sa_mask);
2702
2703 ret = do_sigaction(sig, &new_sa, &old_sa);
2704
2705 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2706}
2707#endif
2708
2709#ifdef __ARCH_WANT_SYS_PAUSE
2710
2711SYSCALL_DEFINE0(pause)
2712{
2713 current->state = TASK_INTERRUPTIBLE;
2714 schedule();
2715 return -ERESTARTNOHAND;
2716}
2717
2718#endif
2719
2720#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2721SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2722{
2723 sigset_t newset;
2724
2725
2726 if (sigsetsize != sizeof(sigset_t))
2727 return -EINVAL;
2728
2729 if (copy_from_user(&newset, unewset, sizeof(newset)))
2730 return -EFAULT;
2731 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2732
2733 spin_lock_irq(¤t->sighand->siglock);
2734 current->saved_sigmask = current->blocked;
2735 current->blocked = newset;
2736 recalc_sigpending();
2737 spin_unlock_irq(¤t->sighand->siglock);
2738
2739 current->state = TASK_INTERRUPTIBLE;
2740 schedule();
2741 set_restore_sigmask();
2742 return -ERESTARTNOHAND;
2743}
2744#endif
2745
2746__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2747{
2748 return NULL;
2749}
2750
2751void __init signals_init(void)
2752{
2753 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2754}
2755
2756#ifdef CONFIG_KGDB_KDB
2757#include <linux/kdb.h>
2758
2759
2760
2761
2762
2763
2764void
2765kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
2766{
2767 static struct task_struct *kdb_prev_t;
2768 int sig, new_t;
2769 if (!spin_trylock(&t->sighand->siglock)) {
2770 kdb_printf("Can't do kill command now.\n"
2771 "The sigmask lock is held somewhere else in "
2772 "kernel, try again later\n");
2773 return;
2774 }
2775 spin_unlock(&t->sighand->siglock);
2776 new_t = kdb_prev_t != t;
2777 kdb_prev_t = t;
2778 if (t->state != TASK_RUNNING && new_t) {
2779 kdb_printf("Process is not RUNNING, sending a signal from "
2780 "kdb risks deadlock\n"
2781 "on the run queue locks. "
2782 "The signal has _not_ been sent.\n"
2783 "Reissue the kill command if you want to risk "
2784 "the deadlock.\n");
2785 return;
2786 }
2787 sig = info->si_signo;
2788 if (send_sig_info(sig, info, t))
2789 kdb_printf("Fail to deliver Signal %d to process %d.\n",
2790 sig, t->pid);
2791 else
2792 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
2793}
2794#endif
2795