1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/slab.h>
14#include <linux/export.h>
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/coredump.h>
21#include <linux/security.h>
22#include <linux/syscalls.h>
23#include <linux/ptrace.h>
24#include <linux/signal.h>
25#include <linux/signalfd.h>
26#include <linux/ratelimit.h>
27#include <linux/tracehook.h>
28#include <linux/capability.h>
29#include <linux/freezer.h>
30#include <linux/pid_namespace.h>
31#include <linux/nsproxy.h>
32#include <linux/user_namespace.h>
33#include <linux/uprobes.h>
34#include <linux/compat.h>
35#include <linux/cn_proc.h>
36#define CREATE_TRACE_POINTS
37#include <trace/events/signal.h>
38
39#include <asm/param.h>
40#include <asm/uaccess.h>
41#include <asm/unistd.h>
42#include <asm/siginfo.h>
43#include <asm/cacheflush.h>
44#include "audit.h"
45
46
47
48
49
50static struct kmem_cache *sigqueue_cachep;
51
52int print_fatal_signals __read_mostly;
53
54static void __user *sig_handler(struct task_struct *t, int sig)
55{
56 return t->sighand->action[sig - 1].sa.sa_handler;
57}
58
59static int sig_handler_ignored(void __user *handler, int sig)
60{
61
62 return handler == SIG_IGN ||
63 (handler == SIG_DFL && sig_kernel_ignore(sig));
64}
65
66static int sig_task_ignored(struct task_struct *t, int sig, bool force)
67{
68 void __user *handler;
69
70 handler = sig_handler(t, sig);
71
72 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
73 handler == SIG_DFL && !force)
74 return 1;
75
76 return sig_handler_ignored(handler, sig);
77}
78
79static int sig_ignored(struct task_struct *t, int sig, bool force)
80{
81
82
83
84
85
86 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
87 return 0;
88
89 if (!sig_task_ignored(t, sig, force))
90 return 0;
91
92
93
94
95 return !t->ptrace;
96}
97
98
99
100
101
102static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
103{
104 unsigned long ready;
105 long i;
106
107 switch (_NSIG_WORDS) {
108 default:
109 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
110 ready |= signal->sig[i] &~ blocked->sig[i];
111 break;
112
113 case 4: ready = signal->sig[3] &~ blocked->sig[3];
114 ready |= signal->sig[2] &~ blocked->sig[2];
115 ready |= signal->sig[1] &~ blocked->sig[1];
116 ready |= signal->sig[0] &~ blocked->sig[0];
117 break;
118
119 case 2: ready = signal->sig[1] &~ blocked->sig[1];
120 ready |= signal->sig[0] &~ blocked->sig[0];
121 break;
122
123 case 1: ready = signal->sig[0] &~ blocked->sig[0];
124 }
125 return ready != 0;
126}
127
128#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
129
130static int recalc_sigpending_tsk(struct task_struct *t)
131{
132 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
133 PENDING(&t->pending, &t->blocked) ||
134 PENDING(&t->signal->shared_pending, &t->blocked)) {
135 set_tsk_thread_flag(t, TIF_SIGPENDING);
136 return 1;
137 }
138
139
140
141
142
143 return 0;
144}
145
146
147
148
149
150void recalc_sigpending_and_wake(struct task_struct *t)
151{
152 if (recalc_sigpending_tsk(t))
153 signal_wake_up(t, 0);
154}
155
156void recalc_sigpending(void)
157{
158 if (!recalc_sigpending_tsk(current) && !freezing(current))
159 clear_thread_flag(TIF_SIGPENDING);
160
161}
162
163
164
165#define SYNCHRONOUS_MASK \
166 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
167 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
168
169int next_signal(struct sigpending *pending, sigset_t *mask)
170{
171 unsigned long i, *s, *m, x;
172 int sig = 0;
173
174 s = pending->signal.sig;
175 m = mask->sig;
176
177
178
179
180
181 x = *s &~ *m;
182 if (x) {
183 if (x & SYNCHRONOUS_MASK)
184 x &= SYNCHRONOUS_MASK;
185 sig = ffz(~x) + 1;
186 return sig;
187 }
188
189 switch (_NSIG_WORDS) {
190 default:
191 for (i = 1; i < _NSIG_WORDS; ++i) {
192 x = *++s &~ *++m;
193 if (!x)
194 continue;
195 sig = ffz(~x) + i*_NSIG_BPW + 1;
196 break;
197 }
198 break;
199
200 case 2:
201 x = s[1] &~ m[1];
202 if (!x)
203 break;
204 sig = ffz(~x) + _NSIG_BPW + 1;
205 break;
206
207 case 1:
208
209 break;
210 }
211
212 return sig;
213}
214
215static inline void print_dropped_signal(int sig)
216{
217 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
218
219 if (!print_fatal_signals)
220 return;
221
222 if (!__ratelimit(&ratelimit_state))
223 return;
224
225 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
226 current->comm, current->pid, sig);
227}
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
247{
248 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
249 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
250 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
251
252 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
253 return false;
254
255 if (mask & JOBCTL_STOP_SIGMASK)
256 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
257
258 task->jobctl |= mask;
259 return true;
260}
261
262
263
264
265
266
267
268
269
270
271
272
273
274void task_clear_jobctl_trapping(struct task_struct *task)
275{
276 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
277 task->jobctl &= ~JOBCTL_TRAPPING;
278 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
279 }
280}
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
298{
299 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
300
301 if (mask & JOBCTL_STOP_PENDING)
302 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
303
304 task->jobctl &= ~mask;
305
306 if (!(task->jobctl & JOBCTL_PENDING_MASK))
307 task_clear_jobctl_trapping(task);
308}
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326static bool task_participate_group_stop(struct task_struct *task)
327{
328 struct signal_struct *sig = task->signal;
329 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
330
331 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
332
333 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
334
335 if (!consume)
336 return false;
337
338 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
339 sig->group_stop_count--;
340
341
342
343
344
345 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
346 sig->flags = SIGNAL_STOP_STOPPED;
347 return true;
348 }
349 return false;
350}
351
352
353
354
355
356
357static struct sigqueue *
358__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
359{
360 struct sigqueue *q = NULL;
361 struct user_struct *user;
362
363
364
365
366
367 rcu_read_lock();
368 user = get_uid(__task_cred(t)->user);
369 atomic_inc(&user->sigpending);
370 rcu_read_unlock();
371
372 if (override_rlimit ||
373 atomic_read(&user->sigpending) <=
374 task_rlimit(t, RLIMIT_SIGPENDING)) {
375 q = kmem_cache_alloc(sigqueue_cachep, flags);
376 } else {
377 print_dropped_signal(sig);
378 }
379
380 if (unlikely(q == NULL)) {
381 atomic_dec(&user->sigpending);
382 free_uid(user);
383 } else {
384 INIT_LIST_HEAD(&q->list);
385 q->flags = 0;
386 q->user = user;
387 }
388
389 return q;
390}
391
392static void __sigqueue_free(struct sigqueue *q)
393{
394 if (q->flags & SIGQUEUE_PREALLOC)
395 return;
396 atomic_dec(&q->user->sigpending);
397 free_uid(q->user);
398 kmem_cache_free(sigqueue_cachep, q);
399}
400
401void flush_sigqueue(struct sigpending *queue)
402{
403 struct sigqueue *q;
404
405 sigemptyset(&queue->signal);
406 while (!list_empty(&queue->list)) {
407 q = list_entry(queue->list.next, struct sigqueue , list);
408 list_del_init(&q->list);
409 __sigqueue_free(q);
410 }
411}
412
413
414
415
416void __flush_signals(struct task_struct *t)
417{
418 clear_tsk_thread_flag(t, TIF_SIGPENDING);
419 flush_sigqueue(&t->pending);
420 flush_sigqueue(&t->signal->shared_pending);
421}
422
423void flush_signals(struct task_struct *t)
424{
425 unsigned long flags;
426
427 spin_lock_irqsave(&t->sighand->siglock, flags);
428 __flush_signals(t);
429 spin_unlock_irqrestore(&t->sighand->siglock, flags);
430}
431
432static void __flush_itimer_signals(struct sigpending *pending)
433{
434 sigset_t signal, retain;
435 struct sigqueue *q, *n;
436
437 signal = pending->signal;
438 sigemptyset(&retain);
439
440 list_for_each_entry_safe(q, n, &pending->list, list) {
441 int sig = q->info.si_signo;
442
443 if (likely(q->info.si_code != SI_TIMER)) {
444 sigaddset(&retain, sig);
445 } else {
446 sigdelset(&signal, sig);
447 list_del_init(&q->list);
448 __sigqueue_free(q);
449 }
450 }
451
452 sigorsets(&pending->signal, &signal, &retain);
453}
454
455void flush_itimer_signals(void)
456{
457 struct task_struct *tsk = current;
458 unsigned long flags;
459
460 spin_lock_irqsave(&tsk->sighand->siglock, flags);
461 __flush_itimer_signals(&tsk->pending);
462 __flush_itimer_signals(&tsk->signal->shared_pending);
463 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
464}
465
466void ignore_signals(struct task_struct *t)
467{
468 int i;
469
470 for (i = 0; i < _NSIG; ++i)
471 t->sighand->action[i].sa.sa_handler = SIG_IGN;
472
473 flush_signals(t);
474}
475
476
477
478
479
480void
481flush_signal_handlers(struct task_struct *t, int force_default)
482{
483 int i;
484 struct k_sigaction *ka = &t->sighand->action[0];
485 for (i = _NSIG ; i != 0 ; i--) {
486 if (force_default || ka->sa.sa_handler != SIG_IGN)
487 ka->sa.sa_handler = SIG_DFL;
488 ka->sa.sa_flags = 0;
489#ifdef __ARCH_HAS_SA_RESTORER
490 ka->sa.sa_restorer = NULL;
491#endif
492 sigemptyset(&ka->sa.sa_mask);
493 ka++;
494 }
495}
496
497int unhandled_signal(struct task_struct *tsk, int sig)
498{
499 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
500 if (is_global_init(tsk))
501 return 1;
502 if (handler != SIG_IGN && handler != SIG_DFL)
503 return 0;
504
505 return !tsk->ptrace;
506}
507
508
509
510
511
512
513
514
515
516
517void
518block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
519{
520 unsigned long flags;
521
522 spin_lock_irqsave(¤t->sighand->siglock, flags);
523 current->notifier_mask = mask;
524 current->notifier_data = priv;
525 current->notifier = notifier;
526 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
527}
528
529
530
531void
532unblock_all_signals(void)
533{
534 unsigned long flags;
535
536 spin_lock_irqsave(¤t->sighand->siglock, flags);
537 current->notifier = NULL;
538 current->notifier_data = NULL;
539 recalc_sigpending();
540 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
541}
542
543static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
544{
545 struct sigqueue *q, *first = NULL;
546
547
548
549
550
551 list_for_each_entry(q, &list->list, list) {
552 if (q->info.si_signo == sig) {
553 if (first)
554 goto still_pending;
555 first = q;
556 }
557 }
558
559 sigdelset(&list->signal, sig);
560
561 if (first) {
562still_pending:
563 list_del_init(&first->list);
564 copy_siginfo(info, &first->info);
565 __sigqueue_free(first);
566 } else {
567
568
569
570
571
572 info->si_signo = sig;
573 info->si_errno = 0;
574 info->si_code = SI_USER;
575 info->si_pid = 0;
576 info->si_uid = 0;
577 }
578}
579
580static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
581 siginfo_t *info)
582{
583 int sig = next_signal(pending, mask);
584
585 if (sig) {
586 if (current->notifier) {
587 if (sigismember(current->notifier_mask, sig)) {
588 if (!(current->notifier)(current->notifier_data)) {
589 clear_thread_flag(TIF_SIGPENDING);
590 return 0;
591 }
592 }
593 }
594
595 collect_signal(sig, pending, info);
596 }
597
598 return sig;
599}
600
601
602
603
604
605
606
607int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
608{
609 int signr;
610
611
612
613
614 signr = __dequeue_signal(&tsk->pending, mask, info);
615 if (!signr) {
616 signr = __dequeue_signal(&tsk->signal->shared_pending,
617 mask, info);
618
619
620
621
622
623
624
625
626
627
628
629
630
631 if (unlikely(signr == SIGALRM)) {
632 struct hrtimer *tmr = &tsk->signal->real_timer;
633
634 if (!hrtimer_is_queued(tmr) &&
635 tsk->signal->it_real_incr.tv64 != 0) {
636 hrtimer_forward(tmr, tmr->base->get_time(),
637 tsk->signal->it_real_incr);
638 hrtimer_restart(tmr);
639 }
640 }
641 }
642
643 recalc_sigpending();
644 if (!signr)
645 return 0;
646
647 if (unlikely(sig_kernel_stop(signr))) {
648
649
650
651
652
653
654
655
656
657
658
659
660 current->jobctl |= JOBCTL_STOP_DEQUEUED;
661 }
662 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
663
664
665
666
667
668
669 spin_unlock(&tsk->sighand->siglock);
670 do_schedule_next_timer(info);
671 spin_lock(&tsk->sighand->siglock);
672 }
673 return signr;
674}
675
676
677
678
679
680
681
682
683
684
685
686
687void signal_wake_up_state(struct task_struct *t, unsigned int state)
688{
689 set_tsk_thread_flag(t, TIF_SIGPENDING);
690
691
692
693
694
695
696
697 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
698 kick_process(t);
699}
700
701
702
703
704
705
706
707
708
709
710static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
711{
712 struct sigqueue *q, *n;
713 sigset_t m;
714
715 sigandsets(&m, mask, &s->signal);
716 if (sigisemptyset(&m))
717 return 0;
718
719 sigandnsets(&s->signal, &s->signal, mask);
720 list_for_each_entry_safe(q, n, &s->list, list) {
721 if (sigismember(mask, q->info.si_signo)) {
722 list_del_init(&q->list);
723 __sigqueue_free(q);
724 }
725 }
726 return 1;
727}
728
729
730
731
732
733
734static int rm_from_queue(unsigned long mask, struct sigpending *s)
735{
736 struct sigqueue *q, *n;
737
738 if (!sigtestsetmask(&s->signal, mask))
739 return 0;
740
741 sigdelsetmask(&s->signal, mask);
742 list_for_each_entry_safe(q, n, &s->list, list) {
743 if (q->info.si_signo < SIGRTMIN &&
744 (mask & sigmask(q->info.si_signo))) {
745 list_del_init(&q->list);
746 __sigqueue_free(q);
747 }
748 }
749 return 1;
750}
751
752static inline int is_si_special(const struct siginfo *info)
753{
754 return info <= SEND_SIG_FORCED;
755}
756
757static inline bool si_fromuser(const struct siginfo *info)
758{
759 return info == SEND_SIG_NOINFO ||
760 (!is_si_special(info) && SI_FROMUSER(info));
761}
762
763
764
765
766static int kill_ok_by_cred(struct task_struct *t)
767{
768 const struct cred *cred = current_cred();
769 const struct cred *tcred = __task_cred(t);
770
771 if (uid_eq(cred->euid, tcred->suid) ||
772 uid_eq(cred->euid, tcred->uid) ||
773 uid_eq(cred->uid, tcred->suid) ||
774 uid_eq(cred->uid, tcred->uid))
775 return 1;
776
777 if (ns_capable(tcred->user_ns, CAP_KILL))
778 return 1;
779
780 return 0;
781}
782
783
784
785
786
787static int check_kill_permission(int sig, struct siginfo *info,
788 struct task_struct *t)
789{
790 struct pid *sid;
791 int error;
792
793 if (!valid_signal(sig))
794 return -EINVAL;
795
796 if (!si_fromuser(info))
797 return 0;
798
799 error = audit_signal_info(sig, t);
800 if (error)
801 return error;
802
803 if (!same_thread_group(current, t) &&
804 !kill_ok_by_cred(t)) {
805 switch (sig) {
806 case SIGCONT:
807 sid = task_session(t);
808
809
810
811
812 if (!sid || sid == task_session(current))
813 break;
814 default:
815 return -EPERM;
816 }
817 }
818
819 return security_task_kill(t, info, sig, 0);
820}
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839static void ptrace_trap_notify(struct task_struct *t)
840{
841 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
842 assert_spin_locked(&t->sighand->siglock);
843
844 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
845 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
846}
847
848
849
850
851
852
853
854
855
856
857
858static bool prepare_signal(int sig, struct task_struct *p, bool force)
859{
860 struct signal_struct *signal = p->signal;
861 struct task_struct *t;
862
863 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
864 if (signal->flags & SIGNAL_GROUP_COREDUMP)
865 return sig == SIGKILL;
866
867
868
869 } else if (sig_kernel_stop(sig)) {
870
871
872
873 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
874 t = p;
875 do {
876 rm_from_queue(sigmask(SIGCONT), &t->pending);
877 } while_each_thread(p, t);
878 } else if (sig == SIGCONT) {
879 unsigned int why;
880
881
882
883 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
884 t = p;
885 do {
886 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
887 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
888 if (likely(!(t->ptrace & PT_SEIZED)))
889 wake_up_state(t, __TASK_STOPPED);
890 else
891 ptrace_trap_notify(t);
892 } while_each_thread(p, t);
893
894
895
896
897
898
899
900
901
902 why = 0;
903 if (signal->flags & SIGNAL_STOP_STOPPED)
904 why |= SIGNAL_CLD_CONTINUED;
905 else if (signal->group_stop_count)
906 why |= SIGNAL_CLD_STOPPED;
907
908 if (why) {
909
910
911
912
913
914 signal->flags = why | SIGNAL_STOP_CONTINUED;
915 signal->group_stop_count = 0;
916 signal->group_exit_code = 0;
917 }
918 }
919
920 return !sig_ignored(p, sig, force);
921}
922
923
924
925
926
927
928
929
930
931static inline int wants_signal(int sig, struct task_struct *p)
932{
933 if (sigismember(&p->blocked, sig))
934 return 0;
935 if (p->flags & PF_EXITING)
936 return 0;
937 if (sig == SIGKILL)
938 return 1;
939 if (task_is_stopped_or_traced(p))
940 return 0;
941 return task_curr(p) || !signal_pending(p);
942}
943
944static void complete_signal(int sig, struct task_struct *p, int group)
945{
946 struct signal_struct *signal = p->signal;
947 struct task_struct *t;
948
949
950
951
952
953
954
955 if (wants_signal(sig, p))
956 t = p;
957 else if (!group || thread_group_empty(p))
958
959
960
961
962 return;
963 else {
964
965
966
967 t = signal->curr_target;
968 while (!wants_signal(sig, t)) {
969 t = next_thread(t);
970 if (t == signal->curr_target)
971
972
973
974
975
976 return;
977 }
978 signal->curr_target = t;
979 }
980
981
982
983
984
985 if (sig_fatal(p, sig) &&
986 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
987 !sigismember(&t->real_blocked, sig) &&
988 (sig == SIGKILL || !t->ptrace)) {
989
990
991
992 if (!sig_kernel_coredump(sig)) {
993
994
995
996
997
998
999 signal->flags = SIGNAL_GROUP_EXIT;
1000 signal->group_exit_code = sig;
1001 signal->group_stop_count = 0;
1002 t = p;
1003 do {
1004 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1005 sigaddset(&t->pending.signal, SIGKILL);
1006 signal_wake_up(t, 1);
1007 } while_each_thread(p, t);
1008 return;
1009 }
1010 }
1011
1012
1013
1014
1015
1016 signal_wake_up(t, sig == SIGKILL);
1017 return;
1018}
1019
1020static inline int legacy_queue(struct sigpending *signals, int sig)
1021{
1022 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1023}
1024
1025#ifdef CONFIG_USER_NS
1026static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1027{
1028 if (current_user_ns() == task_cred_xxx(t, user_ns))
1029 return;
1030
1031 if (SI_FROMKERNEL(info))
1032 return;
1033
1034 rcu_read_lock();
1035 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1036 make_kuid(current_user_ns(), info->si_uid));
1037 rcu_read_unlock();
1038}
1039#else
1040static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1041{
1042 return;
1043}
1044#endif
1045
1046static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1047 int group, int from_ancestor_ns)
1048{
1049 struct sigpending *pending;
1050 struct sigqueue *q;
1051 int override_rlimit;
1052 int ret = 0, result;
1053
1054 assert_spin_locked(&t->sighand->siglock);
1055
1056 result = TRACE_SIGNAL_IGNORED;
1057 if (!prepare_signal(sig, t,
1058 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1059 goto ret;
1060
1061 pending = group ? &t->signal->shared_pending : &t->pending;
1062
1063
1064
1065
1066
1067 result = TRACE_SIGNAL_ALREADY_PENDING;
1068 if (legacy_queue(pending, sig))
1069 goto ret;
1070
1071 result = TRACE_SIGNAL_DELIVERED;
1072
1073
1074
1075
1076 if (info == SEND_SIG_FORCED)
1077 goto out_set;
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088 if (sig < SIGRTMIN)
1089 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1090 else
1091 override_rlimit = 0;
1092
1093 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1094 override_rlimit);
1095 if (q) {
1096 list_add_tail(&q->list, &pending->list);
1097 switch ((unsigned long) info) {
1098 case (unsigned long) SEND_SIG_NOINFO:
1099 q->info.si_signo = sig;
1100 q->info.si_errno = 0;
1101 q->info.si_code = SI_USER;
1102 q->info.si_pid = task_tgid_nr_ns(current,
1103 task_active_pid_ns(t));
1104 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1105 break;
1106 case (unsigned long) SEND_SIG_PRIV:
1107 q->info.si_signo = sig;
1108 q->info.si_errno = 0;
1109 q->info.si_code = SI_KERNEL;
1110 q->info.si_pid = 0;
1111 q->info.si_uid = 0;
1112 break;
1113 default:
1114 copy_siginfo(&q->info, info);
1115 if (from_ancestor_ns)
1116 q->info.si_pid = 0;
1117 break;
1118 }
1119
1120 userns_fixup_signal_uid(&q->info, t);
1121
1122 } else if (!is_si_special(info)) {
1123 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1124
1125
1126
1127
1128
1129 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1130 ret = -EAGAIN;
1131 goto ret;
1132 } else {
1133
1134
1135
1136
1137 result = TRACE_SIGNAL_LOSE_INFO;
1138 }
1139 }
1140
1141out_set:
1142 signalfd_notify(t, sig);
1143 sigaddset(&pending->signal, sig);
1144 complete_signal(sig, t, group);
1145ret:
1146 trace_signal_generate(sig, info, t, group, result);
1147 return ret;
1148}
1149
1150static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1151 int group)
1152{
1153 int from_ancestor_ns = 0;
1154
1155#ifdef CONFIG_PID_NS
1156 from_ancestor_ns = si_fromuser(info) &&
1157 !task_pid_nr_ns(current, task_active_pid_ns(t));
1158#endif
1159
1160 return __send_signal(sig, info, t, group, from_ancestor_ns);
1161}
1162
1163static void print_fatal_signal(int signr)
1164{
1165 struct pt_regs *regs = signal_pt_regs();
1166 printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1167
1168#if defined(__i386__) && !defined(__arch_um__)
1169 printk(KERN_INFO "code at %08lx: ", regs->ip);
1170 {
1171 int i;
1172 for (i = 0; i < 16; i++) {
1173 unsigned char insn;
1174
1175 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1176 break;
1177 printk(KERN_CONT "%02x ", insn);
1178 }
1179 }
1180 printk(KERN_CONT "\n");
1181#endif
1182 preempt_disable();
1183 show_regs(regs);
1184 preempt_enable();
1185}
1186
1187static int __init setup_print_fatal_signals(char *str)
1188{
1189 get_option (&str, &print_fatal_signals);
1190
1191 return 1;
1192}
1193
1194__setup("print-fatal-signals=", setup_print_fatal_signals);
1195
1196int
1197__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1198{
1199 return send_signal(sig, info, p, 1);
1200}
1201
1202static int
1203specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1204{
1205 return send_signal(sig, info, t, 0);
1206}
1207
1208int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1209 bool group)
1210{
1211 unsigned long flags;
1212 int ret = -ESRCH;
1213
1214 if (lock_task_sighand(p, &flags)) {
1215 ret = send_signal(sig, info, p, group);
1216 unlock_task_sighand(p, &flags);
1217 }
1218
1219 return ret;
1220}
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233int
1234force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1235{
1236 unsigned long int flags;
1237 int ret, blocked, ignored;
1238 struct k_sigaction *action;
1239
1240 spin_lock_irqsave(&t->sighand->siglock, flags);
1241 action = &t->sighand->action[sig-1];
1242 ignored = action->sa.sa_handler == SIG_IGN;
1243 blocked = sigismember(&t->blocked, sig);
1244 if (blocked || ignored) {
1245 action->sa.sa_handler = SIG_DFL;
1246 if (blocked) {
1247 sigdelset(&t->blocked, sig);
1248 recalc_sigpending_and_wake(t);
1249 }
1250 }
1251 if (action->sa.sa_handler == SIG_DFL)
1252 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1253 ret = specific_send_sig_info(sig, info, t);
1254 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1255
1256 return ret;
1257}
1258
1259
1260
1261
1262int zap_other_threads(struct task_struct *p)
1263{
1264 struct task_struct *t = p;
1265 int count = 0;
1266
1267 p->signal->group_stop_count = 0;
1268
1269 while_each_thread(p, t) {
1270 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1271 count++;
1272
1273
1274 if (t->exit_state)
1275 continue;
1276 sigaddset(&t->pending.signal, SIGKILL);
1277 signal_wake_up(t, 1);
1278 }
1279
1280 return count;
1281}
1282
1283struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1284 unsigned long *flags)
1285{
1286 struct sighand_struct *sighand;
1287
1288 for (;;) {
1289 local_irq_save(*flags);
1290 rcu_read_lock();
1291 sighand = rcu_dereference(tsk->sighand);
1292 if (unlikely(sighand == NULL)) {
1293 rcu_read_unlock();
1294 local_irq_restore(*flags);
1295 break;
1296 }
1297
1298 spin_lock(&sighand->siglock);
1299 if (likely(sighand == tsk->sighand)) {
1300 rcu_read_unlock();
1301 break;
1302 }
1303 spin_unlock(&sighand->siglock);
1304 rcu_read_unlock();
1305 local_irq_restore(*flags);
1306 }
1307
1308 return sighand;
1309}
1310
1311
1312
1313
1314int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1315{
1316 int ret;
1317
1318 rcu_read_lock();
1319 ret = check_kill_permission(sig, info, p);
1320 rcu_read_unlock();
1321
1322 if (!ret && sig)
1323 ret = do_send_sig_info(sig, info, p, true);
1324
1325 return ret;
1326}
1327
1328
1329
1330
1331
1332
1333int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1334{
1335 struct task_struct *p = NULL;
1336 int retval, success;
1337
1338 success = 0;
1339 retval = -ESRCH;
1340 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1341 int err = group_send_sig_info(sig, info, p);
1342 success |= !err;
1343 retval = err;
1344 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1345 return success ? 0 : retval;
1346}
1347
1348int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1349{
1350 int error = -ESRCH;
1351 struct task_struct *p;
1352
1353 rcu_read_lock();
1354retry:
1355 p = pid_task(pid, PIDTYPE_PID);
1356 if (p) {
1357 error = group_send_sig_info(sig, info, p);
1358 if (unlikely(error == -ESRCH))
1359
1360
1361
1362
1363
1364
1365 goto retry;
1366 }
1367 rcu_read_unlock();
1368
1369 return error;
1370}
1371
1372int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1373{
1374 int error;
1375 rcu_read_lock();
1376 error = kill_pid_info(sig, info, find_vpid(pid));
1377 rcu_read_unlock();
1378 return error;
1379}
1380
1381static int kill_as_cred_perm(const struct cred *cred,
1382 struct task_struct *target)
1383{
1384 const struct cred *pcred = __task_cred(target);
1385 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1386 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1387 return 0;
1388 return 1;
1389}
1390
1391
1392int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1393 const struct cred *cred, u32 secid)
1394{
1395 int ret = -EINVAL;
1396 struct task_struct *p;
1397 unsigned long flags;
1398
1399 if (!valid_signal(sig))
1400 return ret;
1401
1402 rcu_read_lock();
1403 p = pid_task(pid, PIDTYPE_PID);
1404 if (!p) {
1405 ret = -ESRCH;
1406 goto out_unlock;
1407 }
1408 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1409 ret = -EPERM;
1410 goto out_unlock;
1411 }
1412 ret = security_task_kill(p, info, sig, secid);
1413 if (ret)
1414 goto out_unlock;
1415
1416 if (sig) {
1417 if (lock_task_sighand(p, &flags)) {
1418 ret = __send_signal(sig, info, p, 1, 0);
1419 unlock_task_sighand(p, &flags);
1420 } else
1421 ret = -ESRCH;
1422 }
1423out_unlock:
1424 rcu_read_unlock();
1425 return ret;
1426}
1427EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1428
1429
1430
1431
1432
1433
1434
1435
1436static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1437{
1438 int ret;
1439
1440 if (pid > 0) {
1441 rcu_read_lock();
1442 ret = kill_pid_info(sig, info, find_vpid(pid));
1443 rcu_read_unlock();
1444 return ret;
1445 }
1446
1447 read_lock(&tasklist_lock);
1448 if (pid != -1) {
1449 ret = __kill_pgrp_info(sig, info,
1450 pid ? find_vpid(-pid) : task_pgrp(current));
1451 } else {
1452 int retval = 0, count = 0;
1453 struct task_struct * p;
1454
1455 for_each_process(p) {
1456 if (task_pid_vnr(p) > 1 &&
1457 !same_thread_group(p, current)) {
1458 int err = group_send_sig_info(sig, info, p);
1459 ++count;
1460 if (err != -EPERM)
1461 retval = err;
1462 }
1463 }
1464 ret = count ? retval : -ESRCH;
1465 }
1466 read_unlock(&tasklist_lock);
1467
1468 return ret;
1469}
1470
1471
1472
1473
1474
1475int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1476{
1477
1478
1479
1480
1481 if (!valid_signal(sig))
1482 return -EINVAL;
1483
1484 return do_send_sig_info(sig, info, p, false);
1485}
1486
1487#define __si_special(priv) \
1488 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1489
1490int
1491send_sig(int sig, struct task_struct *p, int priv)
1492{
1493 return send_sig_info(sig, __si_special(priv), p);
1494}
1495
1496void
1497force_sig(int sig, struct task_struct *p)
1498{
1499 force_sig_info(sig, SEND_SIG_PRIV, p);
1500}
1501
1502
1503
1504
1505
1506
1507
1508int
1509force_sigsegv(int sig, struct task_struct *p)
1510{
1511 if (sig == SIGSEGV) {
1512 unsigned long flags;
1513 spin_lock_irqsave(&p->sighand->siglock, flags);
1514 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1515 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1516 }
1517 force_sig(SIGSEGV, p);
1518 return 0;
1519}
1520
1521int kill_pgrp(struct pid *pid, int sig, int priv)
1522{
1523 int ret;
1524
1525 read_lock(&tasklist_lock);
1526 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1527 read_unlock(&tasklist_lock);
1528
1529 return ret;
1530}
1531EXPORT_SYMBOL(kill_pgrp);
1532
1533int kill_pid(struct pid *pid, int sig, int priv)
1534{
1535 return kill_pid_info(sig, __si_special(priv), pid);
1536}
1537EXPORT_SYMBOL(kill_pid);
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548struct sigqueue *sigqueue_alloc(void)
1549{
1550 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1551
1552 if (q)
1553 q->flags |= SIGQUEUE_PREALLOC;
1554
1555 return q;
1556}
1557
1558void sigqueue_free(struct sigqueue *q)
1559{
1560 unsigned long flags;
1561 spinlock_t *lock = ¤t->sighand->siglock;
1562
1563 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1564
1565
1566
1567
1568
1569 spin_lock_irqsave(lock, flags);
1570 q->flags &= ~SIGQUEUE_PREALLOC;
1571
1572
1573
1574
1575 if (!list_empty(&q->list))
1576 q = NULL;
1577 spin_unlock_irqrestore(lock, flags);
1578
1579 if (q)
1580 __sigqueue_free(q);
1581}
1582
1583int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1584{
1585 int sig = q->info.si_signo;
1586 struct sigpending *pending;
1587 unsigned long flags;
1588 int ret, result;
1589
1590 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1591
1592 ret = -1;
1593 if (!likely(lock_task_sighand(t, &flags)))
1594 goto ret;
1595
1596 ret = 1;
1597 result = TRACE_SIGNAL_IGNORED;
1598 if (!prepare_signal(sig, t, false))
1599 goto out;
1600
1601 ret = 0;
1602 if (unlikely(!list_empty(&q->list))) {
1603
1604
1605
1606
1607 BUG_ON(q->info.si_code != SI_TIMER);
1608 q->info.si_overrun++;
1609 result = TRACE_SIGNAL_ALREADY_PENDING;
1610 goto out;
1611 }
1612 q->info.si_overrun = 0;
1613
1614 signalfd_notify(t, sig);
1615 pending = group ? &t->signal->shared_pending : &t->pending;
1616 list_add_tail(&q->list, &pending->list);
1617 sigaddset(&pending->signal, sig);
1618 complete_signal(sig, t, group);
1619 result = TRACE_SIGNAL_DELIVERED;
1620out:
1621 trace_signal_generate(sig, &q->info, t, group, result);
1622 unlock_task_sighand(t, &flags);
1623ret:
1624 return ret;
1625}
1626
1627
1628
1629
1630
1631
1632
1633
1634bool do_notify_parent(struct task_struct *tsk, int sig)
1635{
1636 struct siginfo info;
1637 unsigned long flags;
1638 struct sighand_struct *psig;
1639 bool autoreap = false;
1640 cputime_t utime, stime;
1641
1642 BUG_ON(sig == -1);
1643
1644
1645 BUG_ON(task_is_stopped_or_traced(tsk));
1646
1647 BUG_ON(!tsk->ptrace &&
1648 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1649
1650 if (sig != SIGCHLD) {
1651
1652
1653
1654
1655 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1656 sig = SIGCHLD;
1657 }
1658
1659 info.si_signo = sig;
1660 info.si_errno = 0;
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672 rcu_read_lock();
1673 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1674 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1675 task_uid(tsk));
1676 rcu_read_unlock();
1677
1678 task_cputime(tsk, &utime, &stime);
1679 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1680 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1681
1682 info.si_status = tsk->exit_code & 0x7f;
1683 if (tsk->exit_code & 0x80)
1684 info.si_code = CLD_DUMPED;
1685 else if (tsk->exit_code & 0x7f)
1686 info.si_code = CLD_KILLED;
1687 else {
1688 info.si_code = CLD_EXITED;
1689 info.si_status = tsk->exit_code >> 8;
1690 }
1691
1692 psig = tsk->parent->sighand;
1693 spin_lock_irqsave(&psig->siglock, flags);
1694 if (!tsk->ptrace && sig == SIGCHLD &&
1695 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1696 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712 autoreap = true;
1713 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1714 sig = 0;
1715 }
1716 if (valid_signal(sig) && sig)
1717 __group_send_sig_info(sig, &info, tsk->parent);
1718 __wake_up_parent(tsk, tsk->parent);
1719 spin_unlock_irqrestore(&psig->siglock, flags);
1720
1721 return autoreap;
1722}
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737static void do_notify_parent_cldstop(struct task_struct *tsk,
1738 bool for_ptracer, int why)
1739{
1740 struct siginfo info;
1741 unsigned long flags;
1742 struct task_struct *parent;
1743 struct sighand_struct *sighand;
1744 cputime_t utime, stime;
1745
1746 if (for_ptracer) {
1747 parent = tsk->parent;
1748 } else {
1749 tsk = tsk->group_leader;
1750 parent = tsk->real_parent;
1751 }
1752
1753 info.si_signo = SIGCHLD;
1754 info.si_errno = 0;
1755
1756
1757
1758 rcu_read_lock();
1759 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1760 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1761 rcu_read_unlock();
1762
1763 task_cputime(tsk, &utime, &stime);
1764 info.si_utime = cputime_to_clock_t(utime);
1765 info.si_stime = cputime_to_clock_t(stime);
1766
1767 info.si_code = why;
1768 switch (why) {
1769 case CLD_CONTINUED:
1770 info.si_status = SIGCONT;
1771 break;
1772 case CLD_STOPPED:
1773 info.si_status = tsk->signal->group_exit_code & 0x7f;
1774 break;
1775 case CLD_TRAPPED:
1776 info.si_status = tsk->exit_code & 0x7f;
1777 break;
1778 default:
1779 BUG();
1780 }
1781
1782 sighand = parent->sighand;
1783 spin_lock_irqsave(&sighand->siglock, flags);
1784 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1785 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1786 __group_send_sig_info(SIGCHLD, &info, parent);
1787
1788
1789
1790 __wake_up_parent(tsk, parent);
1791 spin_unlock_irqrestore(&sighand->siglock, flags);
1792}
1793
1794static inline int may_ptrace_stop(void)
1795{
1796 if (!likely(current->ptrace))
1797 return 0;
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811 if (unlikely(current->mm->core_state) &&
1812 unlikely(current->mm == current->parent->mm))
1813 return 0;
1814
1815 return 1;
1816}
1817
1818
1819
1820
1821
1822static int sigkill_pending(struct task_struct *tsk)
1823{
1824 return sigismember(&tsk->pending.signal, SIGKILL) ||
1825 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1826}
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1840 __releases(¤t->sighand->siglock)
1841 __acquires(¤t->sighand->siglock)
1842{
1843 bool gstop_done = false;
1844
1845 if (arch_ptrace_stop_needed(exit_code, info)) {
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857 spin_unlock_irq(¤t->sighand->siglock);
1858 arch_ptrace_stop(exit_code, info);
1859 spin_lock_irq(¤t->sighand->siglock);
1860 if (sigkill_pending(current))
1861 return;
1862 }
1863
1864
1865
1866
1867
1868
1869
1870
1871 set_current_state(TASK_TRACED);
1872
1873 current->last_siginfo = info;
1874 current->exit_code = exit_code;
1875
1876
1877
1878
1879
1880
1881
1882
1883 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1884 gstop_done = task_participate_group_stop(current);
1885
1886
1887 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1888 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1889 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1890
1891
1892 task_clear_jobctl_trapping(current);
1893
1894 spin_unlock_irq(¤t->sighand->siglock);
1895 read_lock(&tasklist_lock);
1896 if (may_ptrace_stop()) {
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907 do_notify_parent_cldstop(current, true, why);
1908 if (gstop_done && ptrace_reparented(current))
1909 do_notify_parent_cldstop(current, false, why);
1910
1911
1912
1913
1914
1915
1916
1917 preempt_disable();
1918 read_unlock(&tasklist_lock);
1919 preempt_enable_no_resched();
1920 freezable_schedule();
1921 } else {
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932 if (gstop_done)
1933 do_notify_parent_cldstop(current, false, why);
1934
1935
1936 __set_current_state(TASK_RUNNING);
1937 if (clear_code)
1938 current->exit_code = 0;
1939 read_unlock(&tasklist_lock);
1940 }
1941
1942
1943
1944
1945
1946
1947 spin_lock_irq(¤t->sighand->siglock);
1948 current->last_siginfo = NULL;
1949
1950
1951 current->jobctl &= ~JOBCTL_LISTENING;
1952
1953
1954
1955
1956
1957
1958 recalc_sigpending_tsk(current);
1959}
1960
1961static void ptrace_do_notify(int signr, int exit_code, int why)
1962{
1963 siginfo_t info;
1964
1965 memset(&info, 0, sizeof info);
1966 info.si_signo = signr;
1967 info.si_code = exit_code;
1968 info.si_pid = task_pid_vnr(current);
1969 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1970
1971
1972 ptrace_stop(exit_code, why, 1, &info);
1973}
1974
1975void ptrace_notify(int exit_code)
1976{
1977 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1978 if (unlikely(current->task_works))
1979 task_work_run();
1980
1981 spin_lock_irq(¤t->sighand->siglock);
1982 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1983 spin_unlock_irq(¤t->sighand->siglock);
1984}
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008static bool do_signal_stop(int signr)
2009 __releases(¤t->sighand->siglock)
2010{
2011 struct signal_struct *sig = current->signal;
2012
2013 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2014 unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2015 struct task_struct *t;
2016
2017
2018 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2019
2020 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2021 unlikely(signal_group_exit(sig)))
2022 return false;
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2043 sig->group_exit_code = signr;
2044
2045 sig->group_stop_count = 0;
2046
2047 if (task_set_jobctl_pending(current, signr | gstop))
2048 sig->group_stop_count++;
2049
2050 for (t = next_thread(current); t != current;
2051 t = next_thread(t)) {
2052
2053
2054
2055
2056
2057 if (!task_is_stopped(t) &&
2058 task_set_jobctl_pending(t, signr | gstop)) {
2059 sig->group_stop_count++;
2060 if (likely(!(t->ptrace & PT_SEIZED)))
2061 signal_wake_up(t, 0);
2062 else
2063 ptrace_trap_notify(t);
2064 }
2065 }
2066 }
2067
2068 if (likely(!current->ptrace)) {
2069 int notify = 0;
2070
2071
2072
2073
2074
2075
2076 if (task_participate_group_stop(current))
2077 notify = CLD_STOPPED;
2078
2079 __set_current_state(TASK_STOPPED);
2080 spin_unlock_irq(¤t->sighand->siglock);
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091 if (notify) {
2092 read_lock(&tasklist_lock);
2093 do_notify_parent_cldstop(current, false, notify);
2094 read_unlock(&tasklist_lock);
2095 }
2096
2097
2098 freezable_schedule();
2099 return true;
2100 } else {
2101
2102
2103
2104
2105 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2106 return false;
2107 }
2108}
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125static void do_jobctl_trap(void)
2126{
2127 struct signal_struct *signal = current->signal;
2128 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2129
2130 if (current->ptrace & PT_SEIZED) {
2131 if (!signal->group_stop_count &&
2132 !(signal->flags & SIGNAL_STOP_STOPPED))
2133 signr = SIGTRAP;
2134 WARN_ON_ONCE(!signr);
2135 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2136 CLD_STOPPED);
2137 } else {
2138 WARN_ON_ONCE(!signr);
2139 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2140 current->exit_code = 0;
2141 }
2142}
2143
2144static int ptrace_signal(int signr, siginfo_t *info)
2145{
2146 ptrace_signal_deliver();
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2157 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2158
2159
2160 signr = current->exit_code;
2161 if (signr == 0)
2162 return signr;
2163
2164 current->exit_code = 0;
2165
2166
2167
2168
2169
2170
2171
2172 if (signr != info->si_signo) {
2173 info->si_signo = signr;
2174 info->si_errno = 0;
2175 info->si_code = SI_USER;
2176 rcu_read_lock();
2177 info->si_pid = task_pid_vnr(current->parent);
2178 info->si_uid = from_kuid_munged(current_user_ns(),
2179 task_uid(current->parent));
2180 rcu_read_unlock();
2181 }
2182
2183
2184 if (sigismember(¤t->blocked, signr)) {
2185 specific_send_sig_info(signr, info, current);
2186 signr = 0;
2187 }
2188
2189 return signr;
2190}
2191
2192int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
2193 struct pt_regs *regs, void *cookie)
2194{
2195 struct sighand_struct *sighand = current->sighand;
2196 struct signal_struct *signal = current->signal;
2197 int signr;
2198
2199 if (unlikely(current->task_works))
2200 task_work_run();
2201
2202 if (unlikely(uprobe_deny_signal()))
2203 return 0;
2204
2205
2206
2207
2208
2209
2210 try_to_freeze();
2211
2212relock:
2213 spin_lock_irq(&sighand->siglock);
2214
2215
2216
2217
2218
2219 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2220 int why;
2221
2222 if (signal->flags & SIGNAL_CLD_CONTINUED)
2223 why = CLD_CONTINUED;
2224 else
2225 why = CLD_STOPPED;
2226
2227 signal->flags &= ~SIGNAL_CLD_MASK;
2228
2229 spin_unlock_irq(&sighand->siglock);
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239 read_lock(&tasklist_lock);
2240 do_notify_parent_cldstop(current, false, why);
2241
2242 if (ptrace_reparented(current->group_leader))
2243 do_notify_parent_cldstop(current->group_leader,
2244 true, why);
2245 read_unlock(&tasklist_lock);
2246
2247 goto relock;
2248 }
2249
2250 for (;;) {
2251 struct k_sigaction *ka;
2252
2253 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2254 do_signal_stop(0))
2255 goto relock;
2256
2257 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2258 do_jobctl_trap();
2259 spin_unlock_irq(&sighand->siglock);
2260 goto relock;
2261 }
2262
2263 signr = dequeue_signal(current, ¤t->blocked, info);
2264
2265 if (!signr)
2266 break;
2267
2268 if (unlikely(current->ptrace) && signr != SIGKILL) {
2269 signr = ptrace_signal(signr, info);
2270 if (!signr)
2271 continue;
2272 }
2273
2274 ka = &sighand->action[signr-1];
2275
2276
2277 trace_signal_deliver(signr, info, ka);
2278
2279 if (ka->sa.sa_handler == SIG_IGN)
2280 continue;
2281 if (ka->sa.sa_handler != SIG_DFL) {
2282
2283 *return_ka = *ka;
2284
2285 if (ka->sa.sa_flags & SA_ONESHOT)
2286 ka->sa.sa_handler = SIG_DFL;
2287
2288 break;
2289 }
2290
2291
2292
2293
2294 if (sig_kernel_ignore(signr))
2295 continue;
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2308 !sig_kernel_only(signr))
2309 continue;
2310
2311 if (sig_kernel_stop(signr)) {
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322 if (signr != SIGSTOP) {
2323 spin_unlock_irq(&sighand->siglock);
2324
2325
2326
2327 if (is_current_pgrp_orphaned())
2328 goto relock;
2329
2330 spin_lock_irq(&sighand->siglock);
2331 }
2332
2333 if (likely(do_signal_stop(info->si_signo))) {
2334
2335 goto relock;
2336 }
2337
2338
2339
2340
2341
2342 continue;
2343 }
2344
2345 spin_unlock_irq(&sighand->siglock);
2346
2347
2348
2349
2350 current->flags |= PF_SIGNALED;
2351
2352 if (sig_kernel_coredump(signr)) {
2353 if (print_fatal_signals)
2354 print_fatal_signal(info->si_signo);
2355 proc_coredump_connector(current);
2356
2357
2358
2359
2360
2361
2362
2363
2364 do_coredump(info);
2365 }
2366
2367
2368
2369
2370 do_group_exit(info->si_signo);
2371
2372 }
2373 spin_unlock_irq(&sighand->siglock);
2374 return signr;
2375}
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka,
2391 struct pt_regs *regs, int stepping)
2392{
2393 sigset_t blocked;
2394
2395
2396
2397
2398
2399 clear_restore_sigmask();
2400
2401 sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask);
2402 if (!(ka->sa.sa_flags & SA_NODEFER))
2403 sigaddset(&blocked, sig);
2404 set_current_blocked(&blocked);
2405 tracehook_signal_handler(sig, info, ka, regs, stepping);
2406}
2407
2408void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2409{
2410 if (failed)
2411 force_sigsegv(ksig->sig, current);
2412 else
2413 signal_delivered(ksig->sig, &ksig->info, &ksig->ka,
2414 signal_pt_regs(), stepping);
2415}
2416
2417
2418
2419
2420
2421
2422static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2423{
2424 sigset_t retarget;
2425 struct task_struct *t;
2426
2427 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2428 if (sigisemptyset(&retarget))
2429 return;
2430
2431 t = tsk;
2432 while_each_thread(tsk, t) {
2433 if (t->flags & PF_EXITING)
2434 continue;
2435
2436 if (!has_pending_signals(&retarget, &t->blocked))
2437 continue;
2438
2439 sigandsets(&retarget, &retarget, &t->blocked);
2440
2441 if (!signal_pending(t))
2442 signal_wake_up(t, 0);
2443
2444 if (sigisemptyset(&retarget))
2445 break;
2446 }
2447}
2448
2449void exit_signals(struct task_struct *tsk)
2450{
2451 int group_stop = 0;
2452 sigset_t unblocked;
2453
2454
2455
2456
2457
2458 threadgroup_change_begin(tsk);
2459
2460 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2461 tsk->flags |= PF_EXITING;
2462 threadgroup_change_end(tsk);
2463 return;
2464 }
2465
2466 spin_lock_irq(&tsk->sighand->siglock);
2467
2468
2469
2470
2471 tsk->flags |= PF_EXITING;
2472
2473 threadgroup_change_end(tsk);
2474
2475 if (!signal_pending(tsk))
2476 goto out;
2477
2478 unblocked = tsk->blocked;
2479 signotset(&unblocked);
2480 retarget_shared_pending(tsk, &unblocked);
2481
2482 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2483 task_participate_group_stop(tsk))
2484 group_stop = CLD_STOPPED;
2485out:
2486 spin_unlock_irq(&tsk->sighand->siglock);
2487
2488
2489
2490
2491
2492 if (unlikely(group_stop)) {
2493 read_lock(&tasklist_lock);
2494 do_notify_parent_cldstop(tsk, false, group_stop);
2495 read_unlock(&tasklist_lock);
2496 }
2497}
2498
2499EXPORT_SYMBOL(recalc_sigpending);
2500EXPORT_SYMBOL_GPL(dequeue_signal);
2501EXPORT_SYMBOL(flush_signals);
2502EXPORT_SYMBOL(force_sig);
2503EXPORT_SYMBOL(send_sig);
2504EXPORT_SYMBOL(send_sig_info);
2505EXPORT_SYMBOL(sigprocmask);
2506EXPORT_SYMBOL(block_all_signals);
2507EXPORT_SYMBOL(unblock_all_signals);
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517SYSCALL_DEFINE0(restart_syscall)
2518{
2519 struct restart_block *restart = ¤t_thread_info()->restart_block;
2520 return restart->fn(restart);
2521}
2522
2523long do_no_restart_syscall(struct restart_block *param)
2524{
2525 return -EINTR;
2526}
2527
2528static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2529{
2530 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2531 sigset_t newblocked;
2532
2533 sigandnsets(&newblocked, newset, ¤t->blocked);
2534 retarget_shared_pending(tsk, &newblocked);
2535 }
2536 tsk->blocked = *newset;
2537 recalc_sigpending();
2538}
2539
2540
2541
2542
2543
2544
2545
2546
2547void set_current_blocked(sigset_t *newset)
2548{
2549 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2550 __set_current_blocked(newset);
2551}
2552
2553void __set_current_blocked(const sigset_t *newset)
2554{
2555 struct task_struct *tsk = current;
2556
2557 spin_lock_irq(&tsk->sighand->siglock);
2558 __set_task_blocked(tsk, newset);
2559 spin_unlock_irq(&tsk->sighand->siglock);
2560}
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2571{
2572 struct task_struct *tsk = current;
2573 sigset_t newset;
2574
2575
2576 if (oldset)
2577 *oldset = tsk->blocked;
2578
2579 switch (how) {
2580 case SIG_BLOCK:
2581 sigorsets(&newset, &tsk->blocked, set);
2582 break;
2583 case SIG_UNBLOCK:
2584 sigandnsets(&newset, &tsk->blocked, set);
2585 break;
2586 case SIG_SETMASK:
2587 newset = *set;
2588 break;
2589 default:
2590 return -EINVAL;
2591 }
2592
2593 __set_current_blocked(&newset);
2594 return 0;
2595}
2596
2597
2598
2599
2600
2601
2602
2603
2604SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2605 sigset_t __user *, oset, size_t, sigsetsize)
2606{
2607 sigset_t old_set, new_set;
2608 int error;
2609
2610
2611 if (sigsetsize != sizeof(sigset_t))
2612 return -EINVAL;
2613
2614 old_set = current->blocked;
2615
2616 if (nset) {
2617 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2618 return -EFAULT;
2619 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2620
2621 error = sigprocmask(how, &new_set, NULL);
2622 if (error)
2623 return error;
2624 }
2625
2626 if (oset) {
2627 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2628 return -EFAULT;
2629 }
2630
2631 return 0;
2632}
2633
2634#ifdef CONFIG_COMPAT
2635COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2636 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2637{
2638#ifdef __BIG_ENDIAN
2639 sigset_t old_set = current->blocked;
2640
2641
2642 if (sigsetsize != sizeof(sigset_t))
2643 return -EINVAL;
2644
2645 if (nset) {
2646 compat_sigset_t new32;
2647 sigset_t new_set;
2648 int error;
2649 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2650 return -EFAULT;
2651
2652 sigset_from_compat(&new_set, &new32);
2653 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2654
2655 error = sigprocmask(how, &new_set, NULL);
2656 if (error)
2657 return error;
2658 }
2659 if (oset) {
2660 compat_sigset_t old32;
2661 sigset_to_compat(&old32, &old_set);
2662 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2663 return -EFAULT;
2664 }
2665 return 0;
2666#else
2667 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2668 (sigset_t __user *)oset, sigsetsize);
2669#endif
2670}
2671#endif
2672
2673static int do_sigpending(void *set, unsigned long sigsetsize)
2674{
2675 if (sigsetsize > sizeof(sigset_t))
2676 return -EINVAL;
2677
2678 spin_lock_irq(¤t->sighand->siglock);
2679 sigorsets(set, ¤t->pending.signal,
2680 ¤t->signal->shared_pending.signal);
2681 spin_unlock_irq(¤t->sighand->siglock);
2682
2683
2684 sigandsets(set, ¤t->blocked, set);
2685 return 0;
2686}
2687
2688
2689
2690
2691
2692
2693
2694SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2695{
2696 sigset_t set;
2697 int err = do_sigpending(&set, sigsetsize);
2698 if (!err && copy_to_user(uset, &set, sigsetsize))
2699 err = -EFAULT;
2700 return err;
2701}
2702
2703#ifdef CONFIG_COMPAT
2704COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2705 compat_size_t, sigsetsize)
2706{
2707#ifdef __BIG_ENDIAN
2708 sigset_t set;
2709 int err = do_sigpending(&set, sigsetsize);
2710 if (!err) {
2711 compat_sigset_t set32;
2712 sigset_to_compat(&set32, &set);
2713
2714 if (copy_to_user(uset, &set32, sigsetsize))
2715 err = -EFAULT;
2716 }
2717 return err;
2718#else
2719 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2720#endif
2721}
2722#endif
2723
2724#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2725
2726int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2727{
2728 int err;
2729
2730 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2731 return -EFAULT;
2732 if (from->si_code < 0)
2733 return __copy_to_user(to, from, sizeof(siginfo_t))
2734 ? -EFAULT : 0;
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744 err = __put_user(from->si_signo, &to->si_signo);
2745 err |= __put_user(from->si_errno, &to->si_errno);
2746 err |= __put_user((short)from->si_code, &to->si_code);
2747 switch (from->si_code & __SI_MASK) {
2748 case __SI_KILL:
2749 err |= __put_user(from->si_pid, &to->si_pid);
2750 err |= __put_user(from->si_uid, &to->si_uid);
2751 break;
2752 case __SI_TIMER:
2753 err |= __put_user(from->si_tid, &to->si_tid);
2754 err |= __put_user(from->si_overrun, &to->si_overrun);
2755 err |= __put_user(from->si_ptr, &to->si_ptr);
2756 break;
2757 case __SI_POLL:
2758 err |= __put_user(from->si_band, &to->si_band);
2759 err |= __put_user(from->si_fd, &to->si_fd);
2760 break;
2761 case __SI_FAULT:
2762 err |= __put_user(from->si_addr, &to->si_addr);
2763#ifdef __ARCH_SI_TRAPNO
2764 err |= __put_user(from->si_trapno, &to->si_trapno);
2765#endif
2766#ifdef BUS_MCEERR_AO
2767
2768
2769
2770
2771 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2772 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2773#endif
2774 break;
2775 case __SI_CHLD:
2776 err |= __put_user(from->si_pid, &to->si_pid);
2777 err |= __put_user(from->si_uid, &to->si_uid);
2778 err |= __put_user(from->si_status, &to->si_status);
2779 err |= __put_user(from->si_utime, &to->si_utime);
2780 err |= __put_user(from->si_stime, &to->si_stime);
2781 break;
2782 case __SI_RT:
2783 case __SI_MESGQ:
2784 err |= __put_user(from->si_pid, &to->si_pid);
2785 err |= __put_user(from->si_uid, &to->si_uid);
2786 err |= __put_user(from->si_ptr, &to->si_ptr);
2787 break;
2788#ifdef __ARCH_SIGSYS
2789 case __SI_SYS:
2790 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2791 err |= __put_user(from->si_syscall, &to->si_syscall);
2792 err |= __put_user(from->si_arch, &to->si_arch);
2793 break;
2794#endif
2795 default:
2796 err |= __put_user(from->si_pid, &to->si_pid);
2797 err |= __put_user(from->si_uid, &to->si_uid);
2798 break;
2799 }
2800 return err;
2801}
2802
2803#endif
2804
2805
2806
2807
2808
2809
2810
2811int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2812 const struct timespec *ts)
2813{
2814 struct task_struct *tsk = current;
2815 long timeout = MAX_SCHEDULE_TIMEOUT;
2816 sigset_t mask = *which;
2817 int sig;
2818
2819 if (ts) {
2820 if (!timespec_valid(ts))
2821 return -EINVAL;
2822 timeout = timespec_to_jiffies(ts);
2823
2824
2825
2826
2827 if (ts->tv_sec || ts->tv_nsec)
2828 timeout++;
2829 }
2830
2831
2832
2833
2834 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2835 signotset(&mask);
2836
2837 spin_lock_irq(&tsk->sighand->siglock);
2838 sig = dequeue_signal(tsk, &mask, info);
2839 if (!sig && timeout) {
2840
2841
2842
2843
2844
2845
2846 tsk->real_blocked = tsk->blocked;
2847 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2848 recalc_sigpending();
2849 spin_unlock_irq(&tsk->sighand->siglock);
2850
2851 timeout = freezable_schedule_timeout_interruptible(timeout);
2852
2853 spin_lock_irq(&tsk->sighand->siglock);
2854 __set_task_blocked(tsk, &tsk->real_blocked);
2855 siginitset(&tsk->real_blocked, 0);
2856 sig = dequeue_signal(tsk, &mask, info);
2857 }
2858 spin_unlock_irq(&tsk->sighand->siglock);
2859
2860 if (sig)
2861 return sig;
2862 return timeout ? -EINTR : -EAGAIN;
2863}
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2874 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2875 size_t, sigsetsize)
2876{
2877 sigset_t these;
2878 struct timespec ts;
2879 siginfo_t info;
2880 int ret;
2881
2882
2883 if (sigsetsize != sizeof(sigset_t))
2884 return -EINVAL;
2885
2886 if (copy_from_user(&these, uthese, sizeof(these)))
2887 return -EFAULT;
2888
2889 if (uts) {
2890 if (copy_from_user(&ts, uts, sizeof(ts)))
2891 return -EFAULT;
2892 }
2893
2894 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2895
2896 if (ret > 0 && uinfo) {
2897 if (copy_siginfo_to_user(uinfo, &info))
2898 ret = -EFAULT;
2899 }
2900
2901 return ret;
2902}
2903
2904
2905
2906
2907
2908
2909SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2910{
2911 struct siginfo info;
2912
2913 info.si_signo = sig;
2914 info.si_errno = 0;
2915 info.si_code = SI_USER;
2916 info.si_pid = task_tgid_vnr(current);
2917 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2918
2919 return kill_something_info(sig, &info, pid);
2920}
2921
2922static int
2923do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2924{
2925 struct task_struct *p;
2926 int error = -ESRCH;
2927
2928 rcu_read_lock();
2929 p = find_task_by_vpid(pid);
2930 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2931 error = check_kill_permission(sig, info, p);
2932
2933
2934
2935
2936 if (!error && sig) {
2937 error = do_send_sig_info(sig, info, p, false);
2938
2939
2940
2941
2942
2943 if (unlikely(error == -ESRCH))
2944 error = 0;
2945 }
2946 }
2947 rcu_read_unlock();
2948
2949 return error;
2950}
2951
2952static int do_tkill(pid_t tgid, pid_t pid, int sig)
2953{
2954 struct siginfo info = {};
2955
2956 info.si_signo = sig;
2957 info.si_errno = 0;
2958 info.si_code = SI_TKILL;
2959 info.si_pid = task_tgid_vnr(current);
2960 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2961
2962 return do_send_specific(tgid, pid, sig, &info);
2963}
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2976{
2977
2978 if (pid <= 0 || tgid <= 0)
2979 return -EINVAL;
2980
2981 return do_tkill(tgid, pid, sig);
2982}
2983
2984
2985
2986
2987
2988
2989
2990
2991SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2992{
2993
2994 if (pid <= 0)
2995 return -EINVAL;
2996
2997 return do_tkill(0, pid, sig);
2998}
2999
3000static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3001{
3002
3003
3004
3005 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3006 (task_pid_vnr(current) != pid)) {
3007
3008 WARN_ON_ONCE(info->si_code < 0);
3009 return -EPERM;
3010 }
3011 info->si_signo = sig;
3012
3013
3014 return kill_proc_info(sig, info, pid);
3015}
3016
3017
3018
3019
3020
3021
3022
3023SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3024 siginfo_t __user *, uinfo)
3025{
3026 siginfo_t info;
3027 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3028 return -EFAULT;
3029 return do_rt_sigqueueinfo(pid, sig, &info);
3030}
3031
3032#ifdef CONFIG_COMPAT
3033COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3034 compat_pid_t, pid,
3035 int, sig,
3036 struct compat_siginfo __user *, uinfo)
3037{
3038 siginfo_t info;
3039 int ret = copy_siginfo_from_user32(&info, uinfo);
3040 if (unlikely(ret))
3041 return ret;
3042 return do_rt_sigqueueinfo(pid, sig, &info);
3043}
3044#endif
3045
3046static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3047{
3048
3049 if (pid <= 0 || tgid <= 0)
3050 return -EINVAL;
3051
3052
3053
3054
3055 if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
3056 (task_pid_vnr(current) != pid)) {
3057
3058 WARN_ON_ONCE(info->si_code < 0);
3059 return -EPERM;
3060 }
3061 info->si_signo = sig;
3062
3063 return do_send_specific(tgid, pid, sig, info);
3064}
3065
3066SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3067 siginfo_t __user *, uinfo)
3068{
3069 siginfo_t info;
3070
3071 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3072 return -EFAULT;
3073
3074 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3075}
3076
3077#ifdef CONFIG_COMPAT
3078COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3079 compat_pid_t, tgid,
3080 compat_pid_t, pid,
3081 int, sig,
3082 struct compat_siginfo __user *, uinfo)
3083{
3084 siginfo_t info;
3085
3086 if (copy_siginfo_from_user32(&info, uinfo))
3087 return -EFAULT;
3088 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3089}
3090#endif
3091
3092int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3093{
3094 struct task_struct *t = current;
3095 struct k_sigaction *k;
3096 sigset_t mask;
3097
3098 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3099 return -EINVAL;
3100
3101 k = &t->sighand->action[sig-1];
3102
3103 spin_lock_irq(¤t->sighand->siglock);
3104 if (oact)
3105 *oact = *k;
3106
3107 if (act) {
3108 sigdelsetmask(&act->sa.sa_mask,
3109 sigmask(SIGKILL) | sigmask(SIGSTOP));
3110 *k = *act;
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
3121
3122 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
3123 sigemptyset(&mask);
3124 sigaddset(&mask, sig);
3125 rm_from_queue_full(&mask, &t->signal->shared_pending);
3126 do {
3127 rm_from_queue_full(&mask, &t->pending);
3128 t = next_thread(t);
3129 } while (t != current);
3130 }
3131 }
3132
3133 spin_unlock_irq(¤t->sighand->siglock);
3134 return 0;
3135}
3136
3137static int
3138do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3139{
3140 stack_t oss;
3141 int error;
3142
3143 oss.ss_sp = (void __user *) current->sas_ss_sp;
3144 oss.ss_size = current->sas_ss_size;
3145 oss.ss_flags = sas_ss_flags(sp);
3146
3147 if (uss) {
3148 void __user *ss_sp;
3149 size_t ss_size;
3150 int ss_flags;
3151
3152 error = -EFAULT;
3153 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3154 goto out;
3155 error = __get_user(ss_sp, &uss->ss_sp) |
3156 __get_user(ss_flags, &uss->ss_flags) |
3157 __get_user(ss_size, &uss->ss_size);
3158 if (error)
3159 goto out;
3160
3161 error = -EPERM;
3162 if (on_sig_stack(sp))
3163 goto out;
3164
3165 error = -EINVAL;
3166
3167
3168
3169
3170
3171
3172
3173 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3174 goto out;
3175
3176 if (ss_flags == SS_DISABLE) {
3177 ss_size = 0;
3178 ss_sp = NULL;
3179 } else {
3180 error = -ENOMEM;
3181 if (ss_size < MINSIGSTKSZ)
3182 goto out;
3183 }
3184
3185 current->sas_ss_sp = (unsigned long) ss_sp;
3186 current->sas_ss_size = ss_size;
3187 }
3188
3189 error = 0;
3190 if (uoss) {
3191 error = -EFAULT;
3192 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3193 goto out;
3194 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3195 __put_user(oss.ss_size, &uoss->ss_size) |
3196 __put_user(oss.ss_flags, &uoss->ss_flags);
3197 }
3198
3199out:
3200 return error;
3201}
3202SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3203{
3204 return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3205}
3206
3207int restore_altstack(const stack_t __user *uss)
3208{
3209 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3210
3211 return err == -EFAULT ? err : 0;
3212}
3213
3214int __save_altstack(stack_t __user *uss, unsigned long sp)
3215{
3216 struct task_struct *t = current;
3217 return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3218 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3219 __put_user(t->sas_ss_size, &uss->ss_size);
3220}
3221
3222#ifdef CONFIG_COMPAT
3223COMPAT_SYSCALL_DEFINE2(sigaltstack,
3224 const compat_stack_t __user *, uss_ptr,
3225 compat_stack_t __user *, uoss_ptr)
3226{
3227 stack_t uss, uoss;
3228 int ret;
3229 mm_segment_t seg;
3230
3231 if (uss_ptr) {
3232 compat_stack_t uss32;
3233
3234 memset(&uss, 0, sizeof(stack_t));
3235 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3236 return -EFAULT;
3237 uss.ss_sp = compat_ptr(uss32.ss_sp);
3238 uss.ss_flags = uss32.ss_flags;
3239 uss.ss_size = uss32.ss_size;
3240 }
3241 seg = get_fs();
3242 set_fs(KERNEL_DS);
3243 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3244 (stack_t __force __user *) &uoss,
3245 compat_user_stack_pointer());
3246 set_fs(seg);
3247 if (ret >= 0 && uoss_ptr) {
3248 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3249 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3250 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3251 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3252 ret = -EFAULT;
3253 }
3254 return ret;
3255}
3256
3257int compat_restore_altstack(const compat_stack_t __user *uss)
3258{
3259 int err = compat_sys_sigaltstack(uss, NULL);
3260
3261 return err == -EFAULT ? err : 0;
3262}
3263
3264int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3265{
3266 struct task_struct *t = current;
3267 return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
3268 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3269 __put_user(t->sas_ss_size, &uss->ss_size);
3270}
3271#endif
3272
3273#ifdef __ARCH_WANT_SYS_SIGPENDING
3274
3275
3276
3277
3278
3279SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3280{
3281 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3282}
3283
3284#endif
3285
3286#ifdef __ARCH_WANT_SYS_SIGPROCMASK
3287
3288
3289
3290
3291
3292
3293
3294
3295
3296
3297SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3298 old_sigset_t __user *, oset)
3299{
3300 old_sigset_t old_set, new_set;
3301 sigset_t new_blocked;
3302
3303 old_set = current->blocked.sig[0];
3304
3305 if (nset) {
3306 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3307 return -EFAULT;
3308
3309 new_blocked = current->blocked;
3310
3311 switch (how) {
3312 case SIG_BLOCK:
3313 sigaddsetmask(&new_blocked, new_set);
3314 break;
3315 case SIG_UNBLOCK:
3316 sigdelsetmask(&new_blocked, new_set);
3317 break;
3318 case SIG_SETMASK:
3319 new_blocked.sig[0] = new_set;
3320 break;
3321 default:
3322 return -EINVAL;
3323 }
3324
3325 set_current_blocked(&new_blocked);
3326 }
3327
3328 if (oset) {
3329 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3330 return -EFAULT;
3331 }
3332
3333 return 0;
3334}
3335#endif
3336
3337#ifndef CONFIG_ODD_RT_SIGACTION
3338
3339
3340
3341
3342
3343
3344
3345SYSCALL_DEFINE4(rt_sigaction, int, sig,
3346 const struct sigaction __user *, act,
3347 struct sigaction __user *, oact,
3348 size_t, sigsetsize)
3349{
3350 struct k_sigaction new_sa, old_sa;
3351 int ret = -EINVAL;
3352
3353
3354 if (sigsetsize != sizeof(sigset_t))
3355 goto out;
3356
3357 if (act) {
3358 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3359 return -EFAULT;
3360 }
3361
3362 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3363
3364 if (!ret && oact) {
3365 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3366 return -EFAULT;
3367 }
3368out:
3369 return ret;
3370}
3371#ifdef CONFIG_COMPAT
3372COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3373 const struct compat_sigaction __user *, act,
3374 struct compat_sigaction __user *, oact,
3375 compat_size_t, sigsetsize)
3376{
3377 struct k_sigaction new_ka, old_ka;
3378 compat_sigset_t mask;
3379#ifdef __ARCH_HAS_SA_RESTORER
3380 compat_uptr_t restorer;
3381#endif
3382 int ret;
3383
3384
3385 if (sigsetsize != sizeof(compat_sigset_t))
3386 return -EINVAL;
3387
3388 if (act) {
3389 compat_uptr_t handler;
3390 ret = get_user(handler, &act->sa_handler);
3391 new_ka.sa.sa_handler = compat_ptr(handler);
3392#ifdef __ARCH_HAS_SA_RESTORER
3393 ret |= get_user(restorer, &act->sa_restorer);
3394 new_ka.sa.sa_restorer = compat_ptr(restorer);
3395#endif
3396 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3397 ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
3398 if (ret)
3399 return -EFAULT;
3400 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3401 }
3402
3403 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3404 if (!ret && oact) {
3405 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3406 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3407 &oact->sa_handler);
3408 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3409 ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3410#ifdef __ARCH_HAS_SA_RESTORER
3411 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3412 &oact->sa_restorer);
3413#endif
3414 }
3415 return ret;
3416}
3417#endif
3418#endif
3419
3420#ifdef CONFIG_OLD_SIGACTION
3421SYSCALL_DEFINE3(sigaction, int, sig,
3422 const struct old_sigaction __user *, act,
3423 struct old_sigaction __user *, oact)
3424{
3425 struct k_sigaction new_ka, old_ka;
3426 int ret;
3427
3428 if (act) {
3429 old_sigset_t mask;
3430 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3431 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3432 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3433 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3434 __get_user(mask, &act->sa_mask))
3435 return -EFAULT;
3436#ifdef __ARCH_HAS_KA_RESTORER
3437 new_ka.ka_restorer = NULL;
3438#endif
3439 siginitset(&new_ka.sa.sa_mask, mask);
3440 }
3441
3442 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3443
3444 if (!ret && oact) {
3445 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3446 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3447 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3448 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3449 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3450 return -EFAULT;
3451 }
3452
3453 return ret;
3454}
3455#endif
3456#ifdef CONFIG_COMPAT_OLD_SIGACTION
3457COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3458 const struct compat_old_sigaction __user *, act,
3459 struct compat_old_sigaction __user *, oact)
3460{
3461 struct k_sigaction new_ka, old_ka;
3462 int ret;
3463 compat_old_sigset_t mask;
3464 compat_uptr_t handler, restorer;
3465
3466 if (act) {
3467 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3468 __get_user(handler, &act->sa_handler) ||
3469 __get_user(restorer, &act->sa_restorer) ||
3470 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3471 __get_user(mask, &act->sa_mask))
3472 return -EFAULT;
3473
3474#ifdef __ARCH_HAS_KA_RESTORER
3475 new_ka.ka_restorer = NULL;
3476#endif
3477 new_ka.sa.sa_handler = compat_ptr(handler);
3478 new_ka.sa.sa_restorer = compat_ptr(restorer);
3479 siginitset(&new_ka.sa.sa_mask, mask);
3480 }
3481
3482 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3483
3484 if (!ret && oact) {
3485 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3486 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3487 &oact->sa_handler) ||
3488 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3489 &oact->sa_restorer) ||
3490 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3491 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3492 return -EFAULT;
3493 }
3494 return ret;
3495}
3496#endif
3497
3498#ifdef __ARCH_WANT_SYS_SGETMASK
3499
3500
3501
3502
3503SYSCALL_DEFINE0(sgetmask)
3504{
3505
3506 return current->blocked.sig[0];
3507}
3508
3509SYSCALL_DEFINE1(ssetmask, int, newmask)
3510{
3511 int old = current->blocked.sig[0];
3512 sigset_t newset;
3513
3514 siginitset(&newset, newmask);
3515 set_current_blocked(&newset);
3516
3517 return old;
3518}
3519#endif
3520
3521#ifdef __ARCH_WANT_SYS_SIGNAL
3522
3523
3524
3525SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3526{
3527 struct k_sigaction new_sa, old_sa;
3528 int ret;
3529
3530 new_sa.sa.sa_handler = handler;
3531 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3532 sigemptyset(&new_sa.sa.sa_mask);
3533
3534 ret = do_sigaction(sig, &new_sa, &old_sa);
3535
3536 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3537}
3538#endif
3539
3540#ifdef __ARCH_WANT_SYS_PAUSE
3541
3542SYSCALL_DEFINE0(pause)
3543{
3544 while (!signal_pending(current)) {
3545 current->state = TASK_INTERRUPTIBLE;
3546 schedule();
3547 }
3548 return -ERESTARTNOHAND;
3549}
3550
3551#endif
3552
3553int sigsuspend(sigset_t *set)
3554{
3555 current->saved_sigmask = current->blocked;
3556 set_current_blocked(set);
3557
3558 current->state = TASK_INTERRUPTIBLE;
3559 schedule();
3560 set_restore_sigmask();
3561 return -ERESTARTNOHAND;
3562}
3563
3564
3565
3566
3567
3568
3569
3570SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3571{
3572 sigset_t newset;
3573
3574
3575 if (sigsetsize != sizeof(sigset_t))
3576 return -EINVAL;
3577
3578 if (copy_from_user(&newset, unewset, sizeof(newset)))
3579 return -EFAULT;
3580 return sigsuspend(&newset);
3581}
3582
3583#ifdef CONFIG_COMPAT
3584COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3585{
3586#ifdef __BIG_ENDIAN
3587 sigset_t newset;
3588 compat_sigset_t newset32;
3589
3590
3591 if (sigsetsize != sizeof(sigset_t))
3592 return -EINVAL;
3593
3594 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3595 return -EFAULT;
3596 sigset_from_compat(&newset, &newset32);
3597 return sigsuspend(&newset);
3598#else
3599
3600 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3601#endif
3602}
3603#endif
3604
3605#ifdef CONFIG_OLD_SIGSUSPEND
3606SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3607{
3608 sigset_t blocked;
3609 siginitset(&blocked, mask);
3610 return sigsuspend(&blocked);
3611}
3612#endif
3613#ifdef CONFIG_OLD_SIGSUSPEND3
3614SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3615{
3616 sigset_t blocked;
3617 siginitset(&blocked, mask);
3618 return sigsuspend(&blocked);
3619}
3620#endif
3621
3622__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
3623{
3624 return NULL;
3625}
3626
3627void __init signals_init(void)
3628{
3629 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3630}
3631
3632#ifdef CONFIG_KGDB_KDB
3633#include <linux/kdb.h>
3634
3635
3636
3637
3638
3639
3640void
3641kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3642{
3643 static struct task_struct *kdb_prev_t;
3644 int sig, new_t;
3645 if (!spin_trylock(&t->sighand->siglock)) {
3646 kdb_printf("Can't do kill command now.\n"
3647 "The sigmask lock is held somewhere else in "
3648 "kernel, try again later\n");
3649 return;
3650 }
3651 spin_unlock(&t->sighand->siglock);
3652 new_t = kdb_prev_t != t;
3653 kdb_prev_t = t;
3654 if (t->state != TASK_RUNNING && new_t) {
3655 kdb_printf("Process is not RUNNING, sending a signal from "
3656 "kdb risks deadlock\n"
3657 "on the run queue locks. "
3658 "The signal has _not_ been sent.\n"
3659 "Reissue the kill command if you want to risk "
3660 "the deadlock.\n");
3661 return;
3662 }
3663 sig = info->si_signo;
3664 if (send_sig_info(sig, info, t))
3665 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3666 sig, t->pid);
3667 else
3668 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3669}
3670#endif
3671