1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/slab.h>
14#include <linux/export.h>
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/coredump.h>
21#include <linux/security.h>
22#include <linux/syscalls.h>
23#include <linux/ptrace.h>
24#include <linux/signal.h>
25#include <linux/signalfd.h>
26#include <linux/ratelimit.h>
27#include <linux/tracehook.h>
28#include <linux/capability.h>
29#include <linux/freezer.h>
30#include <linux/pid_namespace.h>
31#include <linux/nsproxy.h>
32#include <linux/user_namespace.h>
33#include <linux/uprobes.h>
34#include <linux/compat.h>
35#include <linux/cn_proc.h>
36#include <linux/compiler.h>
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/signal.h>
40
41#include <asm/param.h>
42#include <asm/uaccess.h>
43#include <asm/unistd.h>
44#include <asm/siginfo.h>
45#include <asm/cacheflush.h>
46#include "audit.h"
47
48
49
50
51
52static struct kmem_cache *sigqueue_cachep;
53
54int print_fatal_signals __read_mostly;
55
56static void __user *sig_handler(struct task_struct *t, int sig)
57{
58 return t->sighand->action[sig - 1].sa.sa_handler;
59}
60
61static int sig_handler_ignored(void __user *handler, int sig)
62{
63
64 return handler == SIG_IGN ||
65 (handler == SIG_DFL && sig_kernel_ignore(sig));
66}
67
68static int sig_task_ignored(struct task_struct *t, int sig, bool force)
69{
70 void __user *handler;
71
72 handler = sig_handler(t, sig);
73
74 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
75 handler == SIG_DFL && !force)
76 return 1;
77
78 return sig_handler_ignored(handler, sig);
79}
80
81static int sig_ignored(struct task_struct *t, int sig, bool force)
82{
83
84
85
86
87
88 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
89 return 0;
90
91 if (!sig_task_ignored(t, sig, force))
92 return 0;
93
94
95
96
97 return !t->ptrace;
98}
99
100
101
102
103
104static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
105{
106 unsigned long ready;
107 long i;
108
109 switch (_NSIG_WORDS) {
110 default:
111 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
112 ready |= signal->sig[i] &~ blocked->sig[i];
113 break;
114
115 case 4: ready = signal->sig[3] &~ blocked->sig[3];
116 ready |= signal->sig[2] &~ blocked->sig[2];
117 ready |= signal->sig[1] &~ blocked->sig[1];
118 ready |= signal->sig[0] &~ blocked->sig[0];
119 break;
120
121 case 2: ready = signal->sig[1] &~ blocked->sig[1];
122 ready |= signal->sig[0] &~ blocked->sig[0];
123 break;
124
125 case 1: ready = signal->sig[0] &~ blocked->sig[0];
126 }
127 return ready != 0;
128}
129
130#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
131
132static int recalc_sigpending_tsk(struct task_struct *t)
133{
134 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
135 PENDING(&t->pending, &t->blocked) ||
136 PENDING(&t->signal->shared_pending, &t->blocked)) {
137 set_tsk_thread_flag(t, TIF_SIGPENDING);
138 return 1;
139 }
140
141
142
143
144
145 return 0;
146}
147
148
149
150
151
152void recalc_sigpending_and_wake(struct task_struct *t)
153{
154 if (recalc_sigpending_tsk(t))
155 signal_wake_up(t, 0);
156}
157
158void recalc_sigpending(void)
159{
160 if (!recalc_sigpending_tsk(current) && !freezing(current))
161 clear_thread_flag(TIF_SIGPENDING);
162
163}
164
165
166
167#define SYNCHRONOUS_MASK \
168 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
169 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
170
171int next_signal(struct sigpending *pending, sigset_t *mask)
172{
173 unsigned long i, *s, *m, x;
174 int sig = 0;
175
176 s = pending->signal.sig;
177 m = mask->sig;
178
179
180
181
182
183 x = *s &~ *m;
184 if (x) {
185 if (x & SYNCHRONOUS_MASK)
186 x &= SYNCHRONOUS_MASK;
187 sig = ffz(~x) + 1;
188 return sig;
189 }
190
191 switch (_NSIG_WORDS) {
192 default:
193 for (i = 1; i < _NSIG_WORDS; ++i) {
194 x = *++s &~ *++m;
195 if (!x)
196 continue;
197 sig = ffz(~x) + i*_NSIG_BPW + 1;
198 break;
199 }
200 break;
201
202 case 2:
203 x = s[1] &~ m[1];
204 if (!x)
205 break;
206 sig = ffz(~x) + _NSIG_BPW + 1;
207 break;
208
209 case 1:
210
211 break;
212 }
213
214 return sig;
215}
216
217static inline void print_dropped_signal(int sig)
218{
219 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
220
221 if (!print_fatal_signals)
222 return;
223
224 if (!__ratelimit(&ratelimit_state))
225 return;
226
227 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
228 current->comm, current->pid, sig);
229}
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
249{
250 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
251 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
252 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
253
254 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
255 return false;
256
257 if (mask & JOBCTL_STOP_SIGMASK)
258 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
259
260 task->jobctl |= mask;
261 return true;
262}
263
264
265
266
267
268
269
270
271
272
273
274
275
276void task_clear_jobctl_trapping(struct task_struct *task)
277{
278 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
279 task->jobctl &= ~JOBCTL_TRAPPING;
280 smp_mb();
281 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
282 }
283}
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
301{
302 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
303
304 if (mask & JOBCTL_STOP_PENDING)
305 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
306
307 task->jobctl &= ~mask;
308
309 if (!(task->jobctl & JOBCTL_PENDING_MASK))
310 task_clear_jobctl_trapping(task);
311}
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329static bool task_participate_group_stop(struct task_struct *task)
330{
331 struct signal_struct *sig = task->signal;
332 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
333
334 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
335
336 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
337
338 if (!consume)
339 return false;
340
341 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
342 sig->group_stop_count--;
343
344
345
346
347
348 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
349 sig->flags = SIGNAL_STOP_STOPPED;
350 return true;
351 }
352 return false;
353}
354
355
356
357
358
359
360static struct sigqueue *
361__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
362{
363 struct sigqueue *q = NULL;
364 struct user_struct *user;
365
366
367
368
369
370 rcu_read_lock();
371 user = get_uid(__task_cred(t)->user);
372 atomic_inc(&user->sigpending);
373 rcu_read_unlock();
374
375 if (override_rlimit ||
376 atomic_read(&user->sigpending) <=
377 task_rlimit(t, RLIMIT_SIGPENDING)) {
378 q = kmem_cache_alloc(sigqueue_cachep, flags);
379 } else {
380 print_dropped_signal(sig);
381 }
382
383 if (unlikely(q == NULL)) {
384 atomic_dec(&user->sigpending);
385 free_uid(user);
386 } else {
387 INIT_LIST_HEAD(&q->list);
388 q->flags = 0;
389 q->user = user;
390 }
391
392 return q;
393}
394
395static void __sigqueue_free(struct sigqueue *q)
396{
397 if (q->flags & SIGQUEUE_PREALLOC)
398 return;
399 atomic_dec(&q->user->sigpending);
400 free_uid(q->user);
401 kmem_cache_free(sigqueue_cachep, q);
402}
403
404void flush_sigqueue(struct sigpending *queue)
405{
406 struct sigqueue *q;
407
408 sigemptyset(&queue->signal);
409 while (!list_empty(&queue->list)) {
410 q = list_entry(queue->list.next, struct sigqueue , list);
411 list_del_init(&q->list);
412 __sigqueue_free(q);
413 }
414}
415
416
417
418
419void __flush_signals(struct task_struct *t)
420{
421 clear_tsk_thread_flag(t, TIF_SIGPENDING);
422 flush_sigqueue(&t->pending);
423 flush_sigqueue(&t->signal->shared_pending);
424}
425
426void flush_signals(struct task_struct *t)
427{
428 unsigned long flags;
429
430 spin_lock_irqsave(&t->sighand->siglock, flags);
431 __flush_signals(t);
432 spin_unlock_irqrestore(&t->sighand->siglock, flags);
433}
434
435static void __flush_itimer_signals(struct sigpending *pending)
436{
437 sigset_t signal, retain;
438 struct sigqueue *q, *n;
439
440 signal = pending->signal;
441 sigemptyset(&retain);
442
443 list_for_each_entry_safe(q, n, &pending->list, list) {
444 int sig = q->info.si_signo;
445
446 if (likely(q->info.si_code != SI_TIMER)) {
447 sigaddset(&retain, sig);
448 } else {
449 sigdelset(&signal, sig);
450 list_del_init(&q->list);
451 __sigqueue_free(q);
452 }
453 }
454
455 sigorsets(&pending->signal, &signal, &retain);
456}
457
458void flush_itimer_signals(void)
459{
460 struct task_struct *tsk = current;
461 unsigned long flags;
462
463 spin_lock_irqsave(&tsk->sighand->siglock, flags);
464 __flush_itimer_signals(&tsk->pending);
465 __flush_itimer_signals(&tsk->signal->shared_pending);
466 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
467}
468
469void ignore_signals(struct task_struct *t)
470{
471 int i;
472
473 for (i = 0; i < _NSIG; ++i)
474 t->sighand->action[i].sa.sa_handler = SIG_IGN;
475
476 flush_signals(t);
477}
478
479
480
481
482
483void
484flush_signal_handlers(struct task_struct *t, int force_default)
485{
486 int i;
487 struct k_sigaction *ka = &t->sighand->action[0];
488 for (i = _NSIG ; i != 0 ; i--) {
489 if (force_default || ka->sa.sa_handler != SIG_IGN)
490 ka->sa.sa_handler = SIG_DFL;
491 ka->sa.sa_flags = 0;
492#ifdef __ARCH_HAS_SA_RESTORER
493 ka->sa.sa_restorer = NULL;
494#endif
495 sigemptyset(&ka->sa.sa_mask);
496 ka++;
497 }
498}
499
500int unhandled_signal(struct task_struct *tsk, int sig)
501{
502 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
503 if (is_global_init(tsk))
504 return 1;
505 if (handler != SIG_IGN && handler != SIG_DFL)
506 return 0;
507
508 return !tsk->ptrace;
509}
510
511
512
513
514
515
516
517
518
519
520void
521block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
522{
523 unsigned long flags;
524
525 spin_lock_irqsave(¤t->sighand->siglock, flags);
526 current->notifier_mask = mask;
527 current->notifier_data = priv;
528 current->notifier = notifier;
529 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
530}
531
532
533
534void
535unblock_all_signals(void)
536{
537 unsigned long flags;
538
539 spin_lock_irqsave(¤t->sighand->siglock, flags);
540 current->notifier = NULL;
541 current->notifier_data = NULL;
542 recalc_sigpending();
543 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
544}
545
546static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
547{
548 struct sigqueue *q, *first = NULL;
549
550
551
552
553
554 list_for_each_entry(q, &list->list, list) {
555 if (q->info.si_signo == sig) {
556 if (first)
557 goto still_pending;
558 first = q;
559 }
560 }
561
562 sigdelset(&list->signal, sig);
563
564 if (first) {
565still_pending:
566 list_del_init(&first->list);
567 copy_siginfo(info, &first->info);
568 __sigqueue_free(first);
569 } else {
570
571
572
573
574
575 info->si_signo = sig;
576 info->si_errno = 0;
577 info->si_code = SI_USER;
578 info->si_pid = 0;
579 info->si_uid = 0;
580 }
581}
582
583static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
584 siginfo_t *info)
585{
586 int sig = next_signal(pending, mask);
587
588 if (sig) {
589 if (current->notifier) {
590 if (sigismember(current->notifier_mask, sig)) {
591 if (!(current->notifier)(current->notifier_data)) {
592 clear_thread_flag(TIF_SIGPENDING);
593 return 0;
594 }
595 }
596 }
597
598 collect_signal(sig, pending, info);
599 }
600
601 return sig;
602}
603
604
605
606
607
608
609
610int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
611{
612 int signr;
613
614
615
616
617 signr = __dequeue_signal(&tsk->pending, mask, info);
618 if (!signr) {
619 signr = __dequeue_signal(&tsk->signal->shared_pending,
620 mask, info);
621
622
623
624
625
626
627
628
629
630
631
632
633
634 if (unlikely(signr == SIGALRM)) {
635 struct hrtimer *tmr = &tsk->signal->real_timer;
636
637 if (!hrtimer_is_queued(tmr) &&
638 tsk->signal->it_real_incr.tv64 != 0) {
639 hrtimer_forward(tmr, tmr->base->get_time(),
640 tsk->signal->it_real_incr);
641 hrtimer_restart(tmr);
642 }
643 }
644 }
645
646 recalc_sigpending();
647 if (!signr)
648 return 0;
649
650 if (unlikely(sig_kernel_stop(signr))) {
651
652
653
654
655
656
657
658
659
660
661
662
663 current->jobctl |= JOBCTL_STOP_DEQUEUED;
664 }
665 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
666
667
668
669
670
671
672 spin_unlock(&tsk->sighand->siglock);
673 do_schedule_next_timer(info);
674 spin_lock(&tsk->sighand->siglock);
675 }
676 return signr;
677}
678
679
680
681
682
683
684
685
686
687
688
689
690void signal_wake_up_state(struct task_struct *t, unsigned int state)
691{
692 set_tsk_thread_flag(t, TIF_SIGPENDING);
693
694
695
696
697
698
699
700 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
701 kick_process(t);
702}
703
704
705
706
707
708
709
710static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
711{
712 struct sigqueue *q, *n;
713 sigset_t m;
714
715 sigandsets(&m, mask, &s->signal);
716 if (sigisemptyset(&m))
717 return 0;
718
719 sigandnsets(&s->signal, &s->signal, mask);
720 list_for_each_entry_safe(q, n, &s->list, list) {
721 if (sigismember(mask, q->info.si_signo)) {
722 list_del_init(&q->list);
723 __sigqueue_free(q);
724 }
725 }
726 return 1;
727}
728
729static inline int is_si_special(const struct siginfo *info)
730{
731 return info <= SEND_SIG_FORCED;
732}
733
734static inline bool si_fromuser(const struct siginfo *info)
735{
736 return info == SEND_SIG_NOINFO ||
737 (!is_si_special(info) && SI_FROMUSER(info));
738}
739
740
741
742
743static int kill_ok_by_cred(struct task_struct *t)
744{
745 const struct cred *cred = current_cred();
746 const struct cred *tcred = __task_cred(t);
747
748 if (uid_eq(cred->euid, tcred->suid) ||
749 uid_eq(cred->euid, tcred->uid) ||
750 uid_eq(cred->uid, tcred->suid) ||
751 uid_eq(cred->uid, tcred->uid))
752 return 1;
753
754 if (ns_capable(tcred->user_ns, CAP_KILL))
755 return 1;
756
757 return 0;
758}
759
760
761
762
763
764static int check_kill_permission(int sig, struct siginfo *info,
765 struct task_struct *t)
766{
767 struct pid *sid;
768 int error;
769
770 if (!valid_signal(sig))
771 return -EINVAL;
772
773 if (!si_fromuser(info))
774 return 0;
775
776 error = audit_signal_info(sig, t);
777 if (error)
778 return error;
779
780 if (!same_thread_group(current, t) &&
781 !kill_ok_by_cred(t)) {
782 switch (sig) {
783 case SIGCONT:
784 sid = task_session(t);
785
786
787
788
789 if (!sid || sid == task_session(current))
790 break;
791 default:
792 return -EPERM;
793 }
794 }
795
796 return security_task_kill(t, info, sig, 0);
797}
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816static void ptrace_trap_notify(struct task_struct *t)
817{
818 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
819 assert_spin_locked(&t->sighand->siglock);
820
821 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
822 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
823}
824
825
826
827
828
829
830
831
832
833
834
835static bool prepare_signal(int sig, struct task_struct *p, bool force)
836{
837 struct signal_struct *signal = p->signal;
838 struct task_struct *t;
839 sigset_t flush;
840
841 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
842 if (signal->flags & SIGNAL_GROUP_COREDUMP)
843 return sig == SIGKILL;
844
845
846
847 } else if (sig_kernel_stop(sig)) {
848
849
850
851 siginitset(&flush, sigmask(SIGCONT));
852 flush_sigqueue_mask(&flush, &signal->shared_pending);
853 for_each_thread(p, t)
854 flush_sigqueue_mask(&flush, &t->pending);
855 } else if (sig == SIGCONT) {
856 unsigned int why;
857
858
859
860 siginitset(&flush, SIG_KERNEL_STOP_MASK);
861 flush_sigqueue_mask(&flush, &signal->shared_pending);
862 for_each_thread(p, t) {
863 flush_sigqueue_mask(&flush, &t->pending);
864 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
865 if (likely(!(t->ptrace & PT_SEIZED)))
866 wake_up_state(t, __TASK_STOPPED);
867 else
868 ptrace_trap_notify(t);
869 }
870
871
872
873
874
875
876
877
878
879 why = 0;
880 if (signal->flags & SIGNAL_STOP_STOPPED)
881 why |= SIGNAL_CLD_CONTINUED;
882 else if (signal->group_stop_count)
883 why |= SIGNAL_CLD_STOPPED;
884
885 if (why) {
886
887
888
889
890
891 signal->flags = why | SIGNAL_STOP_CONTINUED;
892 signal->group_stop_count = 0;
893 signal->group_exit_code = 0;
894 }
895 }
896
897 return !sig_ignored(p, sig, force);
898}
899
900
901
902
903
904
905
906
907
908static inline int wants_signal(int sig, struct task_struct *p)
909{
910 if (sigismember(&p->blocked, sig))
911 return 0;
912 if (p->flags & PF_EXITING)
913 return 0;
914 if (sig == SIGKILL)
915 return 1;
916 if (task_is_stopped_or_traced(p))
917 return 0;
918 return task_curr(p) || !signal_pending(p);
919}
920
921static void complete_signal(int sig, struct task_struct *p, int group)
922{
923 struct signal_struct *signal = p->signal;
924 struct task_struct *t;
925
926
927
928
929
930
931
932 if (wants_signal(sig, p))
933 t = p;
934 else if (!group || thread_group_empty(p))
935
936
937
938
939 return;
940 else {
941
942
943
944 t = signal->curr_target;
945 while (!wants_signal(sig, t)) {
946 t = next_thread(t);
947 if (t == signal->curr_target)
948
949
950
951
952
953 return;
954 }
955 signal->curr_target = t;
956 }
957
958
959
960
961
962 if (sig_fatal(p, sig) &&
963 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
964 !sigismember(&t->real_blocked, sig) &&
965 (sig == SIGKILL || !t->ptrace)) {
966
967
968
969 if (!sig_kernel_coredump(sig)) {
970
971
972
973
974
975
976 signal->flags = SIGNAL_GROUP_EXIT;
977 signal->group_exit_code = sig;
978 signal->group_stop_count = 0;
979 t = p;
980 do {
981 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
982 sigaddset(&t->pending.signal, SIGKILL);
983 signal_wake_up(t, 1);
984 } while_each_thread(p, t);
985 return;
986 }
987 }
988
989
990
991
992
993 signal_wake_up(t, sig == SIGKILL);
994 return;
995}
996
997static inline int legacy_queue(struct sigpending *signals, int sig)
998{
999 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1000}
1001
1002#ifdef CONFIG_USER_NS
1003static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1004{
1005 if (current_user_ns() == task_cred_xxx(t, user_ns))
1006 return;
1007
1008 if (SI_FROMKERNEL(info))
1009 return;
1010
1011 rcu_read_lock();
1012 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1013 make_kuid(current_user_ns(), info->si_uid));
1014 rcu_read_unlock();
1015}
1016#else
1017static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1018{
1019 return;
1020}
1021#endif
1022
1023static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1024 int group, int from_ancestor_ns)
1025{
1026 struct sigpending *pending;
1027 struct sigqueue *q;
1028 int override_rlimit;
1029 int ret = 0, result;
1030
1031 assert_spin_locked(&t->sighand->siglock);
1032
1033 result = TRACE_SIGNAL_IGNORED;
1034 if (!prepare_signal(sig, t,
1035 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1036 goto ret;
1037
1038 pending = group ? &t->signal->shared_pending : &t->pending;
1039
1040
1041
1042
1043
1044 result = TRACE_SIGNAL_ALREADY_PENDING;
1045 if (legacy_queue(pending, sig))
1046 goto ret;
1047
1048 result = TRACE_SIGNAL_DELIVERED;
1049
1050
1051
1052
1053 if (info == SEND_SIG_FORCED)
1054 goto out_set;
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 if (sig < SIGRTMIN)
1066 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1067 else
1068 override_rlimit = 0;
1069
1070 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1071 override_rlimit);
1072 if (q) {
1073 list_add_tail(&q->list, &pending->list);
1074 switch ((unsigned long) info) {
1075 case (unsigned long) SEND_SIG_NOINFO:
1076 q->info.si_signo = sig;
1077 q->info.si_errno = 0;
1078 q->info.si_code = SI_USER;
1079 q->info.si_pid = task_tgid_nr_ns(current,
1080 task_active_pid_ns(t));
1081 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1082 break;
1083 case (unsigned long) SEND_SIG_PRIV:
1084 q->info.si_signo = sig;
1085 q->info.si_errno = 0;
1086 q->info.si_code = SI_KERNEL;
1087 q->info.si_pid = 0;
1088 q->info.si_uid = 0;
1089 break;
1090 default:
1091 copy_siginfo(&q->info, info);
1092 if (from_ancestor_ns)
1093 q->info.si_pid = 0;
1094 break;
1095 }
1096
1097 userns_fixup_signal_uid(&q->info, t);
1098
1099 } else if (!is_si_special(info)) {
1100 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1101
1102
1103
1104
1105
1106 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1107 ret = -EAGAIN;
1108 goto ret;
1109 } else {
1110
1111
1112
1113
1114 result = TRACE_SIGNAL_LOSE_INFO;
1115 }
1116 }
1117
1118out_set:
1119 signalfd_notify(t, sig);
1120 sigaddset(&pending->signal, sig);
1121 complete_signal(sig, t, group);
1122ret:
1123 trace_signal_generate(sig, info, t, group, result);
1124 return ret;
1125}
1126
1127static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1128 int group)
1129{
1130 int from_ancestor_ns = 0;
1131
1132#ifdef CONFIG_PID_NS
1133 from_ancestor_ns = si_fromuser(info) &&
1134 !task_pid_nr_ns(current, task_active_pid_ns(t));
1135#endif
1136
1137 return __send_signal(sig, info, t, group, from_ancestor_ns);
1138}
1139
1140static void print_fatal_signal(int signr)
1141{
1142 struct pt_regs *regs = signal_pt_regs();
1143 printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1144
1145#if defined(__i386__) && !defined(__arch_um__)
1146 printk(KERN_INFO "code at %08lx: ", regs->ip);
1147 {
1148 int i;
1149 for (i = 0; i < 16; i++) {
1150 unsigned char insn;
1151
1152 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1153 break;
1154 printk(KERN_CONT "%02x ", insn);
1155 }
1156 }
1157 printk(KERN_CONT "\n");
1158#endif
1159 preempt_disable();
1160 show_regs(regs);
1161 preempt_enable();
1162}
1163
1164static int __init setup_print_fatal_signals(char *str)
1165{
1166 get_option (&str, &print_fatal_signals);
1167
1168 return 1;
1169}
1170
1171__setup("print-fatal-signals=", setup_print_fatal_signals);
1172
1173int
1174__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1175{
1176 return send_signal(sig, info, p, 1);
1177}
1178
1179static int
1180specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1181{
1182 return send_signal(sig, info, t, 0);
1183}
1184
1185int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1186 bool group)
1187{
1188 unsigned long flags;
1189 int ret = -ESRCH;
1190
1191 if (lock_task_sighand(p, &flags)) {
1192 ret = send_signal(sig, info, p, group);
1193 unlock_task_sighand(p, &flags);
1194 }
1195
1196 return ret;
1197}
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210int
1211force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1212{
1213 unsigned long int flags;
1214 int ret, blocked, ignored;
1215 struct k_sigaction *action;
1216
1217 spin_lock_irqsave(&t->sighand->siglock, flags);
1218 action = &t->sighand->action[sig-1];
1219 ignored = action->sa.sa_handler == SIG_IGN;
1220 blocked = sigismember(&t->blocked, sig);
1221 if (blocked || ignored) {
1222 action->sa.sa_handler = SIG_DFL;
1223 if (blocked) {
1224 sigdelset(&t->blocked, sig);
1225 recalc_sigpending_and_wake(t);
1226 }
1227 }
1228 if (action->sa.sa_handler == SIG_DFL)
1229 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1230 ret = specific_send_sig_info(sig, info, t);
1231 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1232
1233 return ret;
1234}
1235
1236
1237
1238
1239int zap_other_threads(struct task_struct *p)
1240{
1241 struct task_struct *t = p;
1242 int count = 0;
1243
1244 p->signal->group_stop_count = 0;
1245
1246 while_each_thread(p, t) {
1247 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1248 count++;
1249
1250
1251 if (t->exit_state)
1252 continue;
1253 sigaddset(&t->pending.signal, SIGKILL);
1254 signal_wake_up(t, 1);
1255 }
1256
1257 return count;
1258}
1259
1260struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1261 unsigned long *flags)
1262{
1263 struct sighand_struct *sighand;
1264
1265 for (;;) {
1266
1267
1268
1269
1270 local_irq_save(*flags);
1271 rcu_read_lock();
1272 sighand = rcu_dereference(tsk->sighand);
1273 if (unlikely(sighand == NULL)) {
1274 rcu_read_unlock();
1275 local_irq_restore(*flags);
1276 break;
1277 }
1278
1279 spin_lock(&sighand->siglock);
1280 if (likely(sighand == tsk->sighand)) {
1281 rcu_read_unlock();
1282 break;
1283 }
1284 spin_unlock(&sighand->siglock);
1285 rcu_read_unlock();
1286 local_irq_restore(*flags);
1287 }
1288
1289 return sighand;
1290}
1291
1292
1293
1294
1295int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1296{
1297 int ret;
1298
1299 rcu_read_lock();
1300 ret = check_kill_permission(sig, info, p);
1301 rcu_read_unlock();
1302
1303 if (!ret && sig)
1304 ret = do_send_sig_info(sig, info, p, true);
1305
1306 return ret;
1307}
1308
1309
1310
1311
1312
1313
1314int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1315{
1316 struct task_struct *p = NULL;
1317 int retval, success;
1318
1319 success = 0;
1320 retval = -ESRCH;
1321 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1322 int err = group_send_sig_info(sig, info, p);
1323 success |= !err;
1324 retval = err;
1325 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1326 return success ? 0 : retval;
1327}
1328
1329int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1330{
1331 int error = -ESRCH;
1332 struct task_struct *p;
1333
1334 rcu_read_lock();
1335retry:
1336 p = pid_task(pid, PIDTYPE_PID);
1337 if (p) {
1338 error = group_send_sig_info(sig, info, p);
1339 if (unlikely(error == -ESRCH))
1340
1341
1342
1343
1344
1345
1346 goto retry;
1347 }
1348 rcu_read_unlock();
1349
1350 return error;
1351}
1352
1353int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1354{
1355 int error;
1356 rcu_read_lock();
1357 error = kill_pid_info(sig, info, find_vpid(pid));
1358 rcu_read_unlock();
1359 return error;
1360}
1361
1362static int kill_as_cred_perm(const struct cred *cred,
1363 struct task_struct *target)
1364{
1365 const struct cred *pcred = __task_cred(target);
1366 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1367 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1368 return 0;
1369 return 1;
1370}
1371
1372
1373int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1374 const struct cred *cred, u32 secid)
1375{
1376 int ret = -EINVAL;
1377 struct task_struct *p;
1378 unsigned long flags;
1379
1380 if (!valid_signal(sig))
1381 return ret;
1382
1383 rcu_read_lock();
1384 p = pid_task(pid, PIDTYPE_PID);
1385 if (!p) {
1386 ret = -ESRCH;
1387 goto out_unlock;
1388 }
1389 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1390 ret = -EPERM;
1391 goto out_unlock;
1392 }
1393 ret = security_task_kill(p, info, sig, secid);
1394 if (ret)
1395 goto out_unlock;
1396
1397 if (sig) {
1398 if (lock_task_sighand(p, &flags)) {
1399 ret = __send_signal(sig, info, p, 1, 0);
1400 unlock_task_sighand(p, &flags);
1401 } else
1402 ret = -ESRCH;
1403 }
1404out_unlock:
1405 rcu_read_unlock();
1406 return ret;
1407}
1408EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1409
1410
1411
1412
1413
1414
1415
1416
1417static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1418{
1419 int ret;
1420
1421 if (pid > 0) {
1422 rcu_read_lock();
1423 ret = kill_pid_info(sig, info, find_vpid(pid));
1424 rcu_read_unlock();
1425 return ret;
1426 }
1427
1428 read_lock(&tasklist_lock);
1429 if (pid != -1) {
1430 ret = __kill_pgrp_info(sig, info,
1431 pid ? find_vpid(-pid) : task_pgrp(current));
1432 } else {
1433 int retval = 0, count = 0;
1434 struct task_struct * p;
1435
1436 for_each_process(p) {
1437 if (task_pid_vnr(p) > 1 &&
1438 !same_thread_group(p, current)) {
1439 int err = group_send_sig_info(sig, info, p);
1440 ++count;
1441 if (err != -EPERM)
1442 retval = err;
1443 }
1444 }
1445 ret = count ? retval : -ESRCH;
1446 }
1447 read_unlock(&tasklist_lock);
1448
1449 return ret;
1450}
1451
1452
1453
1454
1455
1456int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1457{
1458
1459
1460
1461
1462 if (!valid_signal(sig))
1463 return -EINVAL;
1464
1465 return do_send_sig_info(sig, info, p, false);
1466}
1467
1468#define __si_special(priv) \
1469 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1470
1471int
1472send_sig(int sig, struct task_struct *p, int priv)
1473{
1474 return send_sig_info(sig, __si_special(priv), p);
1475}
1476
1477void
1478force_sig(int sig, struct task_struct *p)
1479{
1480 force_sig_info(sig, SEND_SIG_PRIV, p);
1481}
1482
1483
1484
1485
1486
1487
1488
1489int
1490force_sigsegv(int sig, struct task_struct *p)
1491{
1492 if (sig == SIGSEGV) {
1493 unsigned long flags;
1494 spin_lock_irqsave(&p->sighand->siglock, flags);
1495 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1496 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1497 }
1498 force_sig(SIGSEGV, p);
1499 return 0;
1500}
1501
1502int kill_pgrp(struct pid *pid, int sig, int priv)
1503{
1504 int ret;
1505
1506 read_lock(&tasklist_lock);
1507 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1508 read_unlock(&tasklist_lock);
1509
1510 return ret;
1511}
1512EXPORT_SYMBOL(kill_pgrp);
1513
1514int kill_pid(struct pid *pid, int sig, int priv)
1515{
1516 return kill_pid_info(sig, __si_special(priv), pid);
1517}
1518EXPORT_SYMBOL(kill_pid);
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529struct sigqueue *sigqueue_alloc(void)
1530{
1531 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1532
1533 if (q)
1534 q->flags |= SIGQUEUE_PREALLOC;
1535
1536 return q;
1537}
1538
1539void sigqueue_free(struct sigqueue *q)
1540{
1541 unsigned long flags;
1542 spinlock_t *lock = ¤t->sighand->siglock;
1543
1544 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1545
1546
1547
1548
1549
1550 spin_lock_irqsave(lock, flags);
1551 q->flags &= ~SIGQUEUE_PREALLOC;
1552
1553
1554
1555
1556 if (!list_empty(&q->list))
1557 q = NULL;
1558 spin_unlock_irqrestore(lock, flags);
1559
1560 if (q)
1561 __sigqueue_free(q);
1562}
1563
1564int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1565{
1566 int sig = q->info.si_signo;
1567 struct sigpending *pending;
1568 unsigned long flags;
1569 int ret, result;
1570
1571 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1572
1573 ret = -1;
1574 if (!likely(lock_task_sighand(t, &flags)))
1575 goto ret;
1576
1577 ret = 1;
1578 result = TRACE_SIGNAL_IGNORED;
1579 if (!prepare_signal(sig, t, false))
1580 goto out;
1581
1582 ret = 0;
1583 if (unlikely(!list_empty(&q->list))) {
1584
1585
1586
1587
1588 BUG_ON(q->info.si_code != SI_TIMER);
1589 q->info.si_overrun++;
1590 result = TRACE_SIGNAL_ALREADY_PENDING;
1591 goto out;
1592 }
1593 q->info.si_overrun = 0;
1594
1595 signalfd_notify(t, sig);
1596 pending = group ? &t->signal->shared_pending : &t->pending;
1597 list_add_tail(&q->list, &pending->list);
1598 sigaddset(&pending->signal, sig);
1599 complete_signal(sig, t, group);
1600 result = TRACE_SIGNAL_DELIVERED;
1601out:
1602 trace_signal_generate(sig, &q->info, t, group, result);
1603 unlock_task_sighand(t, &flags);
1604ret:
1605 return ret;
1606}
1607
1608
1609
1610
1611
1612
1613
1614
1615bool do_notify_parent(struct task_struct *tsk, int sig)
1616{
1617 struct siginfo info;
1618 unsigned long flags;
1619 struct sighand_struct *psig;
1620 bool autoreap = false;
1621 cputime_t utime, stime;
1622
1623 BUG_ON(sig == -1);
1624
1625
1626 BUG_ON(task_is_stopped_or_traced(tsk));
1627
1628 BUG_ON(!tsk->ptrace &&
1629 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1630
1631 if (sig != SIGCHLD) {
1632
1633
1634
1635
1636 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1637 sig = SIGCHLD;
1638 }
1639
1640 info.si_signo = sig;
1641 info.si_errno = 0;
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653 rcu_read_lock();
1654 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1655 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1656 task_uid(tsk));
1657 rcu_read_unlock();
1658
1659 task_cputime(tsk, &utime, &stime);
1660 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1661 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1662
1663 info.si_status = tsk->exit_code & 0x7f;
1664 if (tsk->exit_code & 0x80)
1665 info.si_code = CLD_DUMPED;
1666 else if (tsk->exit_code & 0x7f)
1667 info.si_code = CLD_KILLED;
1668 else {
1669 info.si_code = CLD_EXITED;
1670 info.si_status = tsk->exit_code >> 8;
1671 }
1672
1673 psig = tsk->parent->sighand;
1674 spin_lock_irqsave(&psig->siglock, flags);
1675 if (!tsk->ptrace && sig == SIGCHLD &&
1676 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1677 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693 autoreap = true;
1694 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1695 sig = 0;
1696 }
1697 if (valid_signal(sig) && sig)
1698 __group_send_sig_info(sig, &info, tsk->parent);
1699 __wake_up_parent(tsk, tsk->parent);
1700 spin_unlock_irqrestore(&psig->siglock, flags);
1701
1702 return autoreap;
1703}
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718static void do_notify_parent_cldstop(struct task_struct *tsk,
1719 bool for_ptracer, int why)
1720{
1721 struct siginfo info;
1722 unsigned long flags;
1723 struct task_struct *parent;
1724 struct sighand_struct *sighand;
1725 cputime_t utime, stime;
1726
1727 if (for_ptracer) {
1728 parent = tsk->parent;
1729 } else {
1730 tsk = tsk->group_leader;
1731 parent = tsk->real_parent;
1732 }
1733
1734 info.si_signo = SIGCHLD;
1735 info.si_errno = 0;
1736
1737
1738
1739 rcu_read_lock();
1740 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1741 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1742 rcu_read_unlock();
1743
1744 task_cputime(tsk, &utime, &stime);
1745 info.si_utime = cputime_to_clock_t(utime);
1746 info.si_stime = cputime_to_clock_t(stime);
1747
1748 info.si_code = why;
1749 switch (why) {
1750 case CLD_CONTINUED:
1751 info.si_status = SIGCONT;
1752 break;
1753 case CLD_STOPPED:
1754 info.si_status = tsk->signal->group_exit_code & 0x7f;
1755 break;
1756 case CLD_TRAPPED:
1757 info.si_status = tsk->exit_code & 0x7f;
1758 break;
1759 default:
1760 BUG();
1761 }
1762
1763 sighand = parent->sighand;
1764 spin_lock_irqsave(&sighand->siglock, flags);
1765 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1766 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1767 __group_send_sig_info(SIGCHLD, &info, parent);
1768
1769
1770
1771 __wake_up_parent(tsk, parent);
1772 spin_unlock_irqrestore(&sighand->siglock, flags);
1773}
1774
1775static inline int may_ptrace_stop(void)
1776{
1777 if (!likely(current->ptrace))
1778 return 0;
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792 if (unlikely(current->mm->core_state) &&
1793 unlikely(current->mm == current->parent->mm))
1794 return 0;
1795
1796 return 1;
1797}
1798
1799
1800
1801
1802
1803static int sigkill_pending(struct task_struct *tsk)
1804{
1805 return sigismember(&tsk->pending.signal, SIGKILL) ||
1806 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1807}
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1821 __releases(¤t->sighand->siglock)
1822 __acquires(¤t->sighand->siglock)
1823{
1824 bool gstop_done = false;
1825
1826 if (arch_ptrace_stop_needed(exit_code, info)) {
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838 spin_unlock_irq(¤t->sighand->siglock);
1839 arch_ptrace_stop(exit_code, info);
1840 spin_lock_irq(¤t->sighand->siglock);
1841 if (sigkill_pending(current))
1842 return;
1843 }
1844
1845
1846
1847
1848
1849
1850
1851
1852 set_current_state(TASK_TRACED);
1853
1854 current->last_siginfo = info;
1855 current->exit_code = exit_code;
1856
1857
1858
1859
1860
1861
1862
1863
1864 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1865 gstop_done = task_participate_group_stop(current);
1866
1867
1868 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1869 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1870 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1871
1872
1873 task_clear_jobctl_trapping(current);
1874
1875 spin_unlock_irq(¤t->sighand->siglock);
1876 read_lock(&tasklist_lock);
1877 if (may_ptrace_stop()) {
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888 do_notify_parent_cldstop(current, true, why);
1889 if (gstop_done && ptrace_reparented(current))
1890 do_notify_parent_cldstop(current, false, why);
1891
1892
1893
1894
1895
1896
1897
1898 preempt_disable();
1899 read_unlock(&tasklist_lock);
1900 preempt_enable_no_resched();
1901 freezable_schedule();
1902 } else {
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913 if (gstop_done)
1914 do_notify_parent_cldstop(current, false, why);
1915
1916
1917 __set_current_state(TASK_RUNNING);
1918 if (clear_code)
1919 current->exit_code = 0;
1920 read_unlock(&tasklist_lock);
1921 }
1922
1923
1924
1925
1926
1927
1928 spin_lock_irq(¤t->sighand->siglock);
1929 current->last_siginfo = NULL;
1930
1931
1932 current->jobctl &= ~JOBCTL_LISTENING;
1933
1934
1935
1936
1937
1938
1939 recalc_sigpending_tsk(current);
1940}
1941
1942static void ptrace_do_notify(int signr, int exit_code, int why)
1943{
1944 siginfo_t info;
1945
1946 memset(&info, 0, sizeof info);
1947 info.si_signo = signr;
1948 info.si_code = exit_code;
1949 info.si_pid = task_pid_vnr(current);
1950 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1951
1952
1953 ptrace_stop(exit_code, why, 1, &info);
1954}
1955
1956void ptrace_notify(int exit_code)
1957{
1958 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1959 if (unlikely(current->task_works))
1960 task_work_run();
1961
1962 spin_lock_irq(¤t->sighand->siglock);
1963 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1964 spin_unlock_irq(¤t->sighand->siglock);
1965}
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989static bool do_signal_stop(int signr)
1990 __releases(¤t->sighand->siglock)
1991{
1992 struct signal_struct *sig = current->signal;
1993
1994 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1995 unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1996 struct task_struct *t;
1997
1998
1999 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2000
2001 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2002 unlikely(signal_group_exit(sig)))
2003 return false;
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2024 sig->group_exit_code = signr;
2025
2026 sig->group_stop_count = 0;
2027
2028 if (task_set_jobctl_pending(current, signr | gstop))
2029 sig->group_stop_count++;
2030
2031 t = current;
2032 while_each_thread(current, t) {
2033
2034
2035
2036
2037
2038 if (!task_is_stopped(t) &&
2039 task_set_jobctl_pending(t, signr | gstop)) {
2040 sig->group_stop_count++;
2041 if (likely(!(t->ptrace & PT_SEIZED)))
2042 signal_wake_up(t, 0);
2043 else
2044 ptrace_trap_notify(t);
2045 }
2046 }
2047 }
2048
2049 if (likely(!current->ptrace)) {
2050 int notify = 0;
2051
2052
2053
2054
2055
2056
2057 if (task_participate_group_stop(current))
2058 notify = CLD_STOPPED;
2059
2060 __set_current_state(TASK_STOPPED);
2061 spin_unlock_irq(¤t->sighand->siglock);
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072 if (notify) {
2073 read_lock(&tasklist_lock);
2074 do_notify_parent_cldstop(current, false, notify);
2075 read_unlock(&tasklist_lock);
2076 }
2077
2078
2079 freezable_schedule();
2080 return true;
2081 } else {
2082
2083
2084
2085
2086 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2087 return false;
2088 }
2089}
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106static void do_jobctl_trap(void)
2107{
2108 struct signal_struct *signal = current->signal;
2109 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2110
2111 if (current->ptrace & PT_SEIZED) {
2112 if (!signal->group_stop_count &&
2113 !(signal->flags & SIGNAL_STOP_STOPPED))
2114 signr = SIGTRAP;
2115 WARN_ON_ONCE(!signr);
2116 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2117 CLD_STOPPED);
2118 } else {
2119 WARN_ON_ONCE(!signr);
2120 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2121 current->exit_code = 0;
2122 }
2123}
2124
2125static int ptrace_signal(int signr, siginfo_t *info)
2126{
2127 ptrace_signal_deliver();
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2138 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2139
2140
2141 signr = current->exit_code;
2142 if (signr == 0)
2143 return signr;
2144
2145 current->exit_code = 0;
2146
2147
2148
2149
2150
2151
2152
2153 if (signr != info->si_signo) {
2154 info->si_signo = signr;
2155 info->si_errno = 0;
2156 info->si_code = SI_USER;
2157 rcu_read_lock();
2158 info->si_pid = task_pid_vnr(current->parent);
2159 info->si_uid = from_kuid_munged(current_user_ns(),
2160 task_uid(current->parent));
2161 rcu_read_unlock();
2162 }
2163
2164
2165 if (sigismember(¤t->blocked, signr)) {
2166 specific_send_sig_info(signr, info, current);
2167 signr = 0;
2168 }
2169
2170 return signr;
2171}
2172
2173int get_signal(struct ksignal *ksig)
2174{
2175 struct sighand_struct *sighand = current->sighand;
2176 struct signal_struct *signal = current->signal;
2177 int signr;
2178
2179 if (unlikely(current->task_works))
2180 task_work_run();
2181
2182 if (unlikely(uprobe_deny_signal()))
2183 return 0;
2184
2185
2186
2187
2188
2189
2190 try_to_freeze();
2191
2192relock:
2193 spin_lock_irq(&sighand->siglock);
2194
2195
2196
2197
2198
2199 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2200 int why;
2201
2202 if (signal->flags & SIGNAL_CLD_CONTINUED)
2203 why = CLD_CONTINUED;
2204 else
2205 why = CLD_STOPPED;
2206
2207 signal->flags &= ~SIGNAL_CLD_MASK;
2208
2209 spin_unlock_irq(&sighand->siglock);
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219 read_lock(&tasklist_lock);
2220 do_notify_parent_cldstop(current, false, why);
2221
2222 if (ptrace_reparented(current->group_leader))
2223 do_notify_parent_cldstop(current->group_leader,
2224 true, why);
2225 read_unlock(&tasklist_lock);
2226
2227 goto relock;
2228 }
2229
2230 for (;;) {
2231 struct k_sigaction *ka;
2232
2233 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2234 do_signal_stop(0))
2235 goto relock;
2236
2237 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2238 do_jobctl_trap();
2239 spin_unlock_irq(&sighand->siglock);
2240 goto relock;
2241 }
2242
2243 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2244
2245 if (!signr)
2246 break;
2247
2248 if (unlikely(current->ptrace) && signr != SIGKILL) {
2249 signr = ptrace_signal(signr, &ksig->info);
2250 if (!signr)
2251 continue;
2252 }
2253
2254 ka = &sighand->action[signr-1];
2255
2256
2257 trace_signal_deliver(signr, &ksig->info, ka);
2258
2259 if (ka->sa.sa_handler == SIG_IGN)
2260 continue;
2261 if (ka->sa.sa_handler != SIG_DFL) {
2262
2263 ksig->ka = *ka;
2264
2265 if (ka->sa.sa_flags & SA_ONESHOT)
2266 ka->sa.sa_handler = SIG_DFL;
2267
2268 break;
2269 }
2270
2271
2272
2273
2274 if (sig_kernel_ignore(signr))
2275 continue;
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2288 !sig_kernel_only(signr))
2289 continue;
2290
2291 if (sig_kernel_stop(signr)) {
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302 if (signr != SIGSTOP) {
2303 spin_unlock_irq(&sighand->siglock);
2304
2305
2306
2307 if (is_current_pgrp_orphaned())
2308 goto relock;
2309
2310 spin_lock_irq(&sighand->siglock);
2311 }
2312
2313 if (likely(do_signal_stop(ksig->info.si_signo))) {
2314
2315 goto relock;
2316 }
2317
2318
2319
2320
2321
2322 continue;
2323 }
2324
2325 spin_unlock_irq(&sighand->siglock);
2326
2327
2328
2329
2330 current->flags |= PF_SIGNALED;
2331
2332 if (sig_kernel_coredump(signr)) {
2333 if (print_fatal_signals)
2334 print_fatal_signal(ksig->info.si_signo);
2335 proc_coredump_connector(current);
2336
2337
2338
2339
2340
2341
2342
2343
2344 do_coredump(&ksig->info);
2345 }
2346
2347
2348
2349
2350 do_group_exit(ksig->info.si_signo);
2351
2352 }
2353 spin_unlock_irq(&sighand->siglock);
2354
2355 ksig->sig = signr;
2356 return ksig->sig > 0;
2357}
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369static void signal_delivered(struct ksignal *ksig, int stepping)
2370{
2371 sigset_t blocked;
2372
2373
2374
2375
2376
2377 clear_restore_sigmask();
2378
2379 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2380 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2381 sigaddset(&blocked, ksig->sig);
2382 set_current_blocked(&blocked);
2383 tracehook_signal_handler(stepping);
2384}
2385
2386void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2387{
2388 if (failed)
2389 force_sigsegv(ksig->sig, current);
2390 else
2391 signal_delivered(ksig, stepping);
2392}
2393
2394
2395
2396
2397
2398
2399static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2400{
2401 sigset_t retarget;
2402 struct task_struct *t;
2403
2404 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2405 if (sigisemptyset(&retarget))
2406 return;
2407
2408 t = tsk;
2409 while_each_thread(tsk, t) {
2410 if (t->flags & PF_EXITING)
2411 continue;
2412
2413 if (!has_pending_signals(&retarget, &t->blocked))
2414 continue;
2415
2416 sigandsets(&retarget, &retarget, &t->blocked);
2417
2418 if (!signal_pending(t))
2419 signal_wake_up(t, 0);
2420
2421 if (sigisemptyset(&retarget))
2422 break;
2423 }
2424}
2425
2426void exit_signals(struct task_struct *tsk)
2427{
2428 int group_stop = 0;
2429 sigset_t unblocked;
2430
2431
2432
2433
2434
2435 threadgroup_change_begin(tsk);
2436
2437 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2438 tsk->flags |= PF_EXITING;
2439 threadgroup_change_end(tsk);
2440 return;
2441 }
2442
2443 spin_lock_irq(&tsk->sighand->siglock);
2444
2445
2446
2447
2448 tsk->flags |= PF_EXITING;
2449
2450 threadgroup_change_end(tsk);
2451
2452 if (!signal_pending(tsk))
2453 goto out;
2454
2455 unblocked = tsk->blocked;
2456 signotset(&unblocked);
2457 retarget_shared_pending(tsk, &unblocked);
2458
2459 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2460 task_participate_group_stop(tsk))
2461 group_stop = CLD_STOPPED;
2462out:
2463 spin_unlock_irq(&tsk->sighand->siglock);
2464
2465
2466
2467
2468
2469 if (unlikely(group_stop)) {
2470 read_lock(&tasklist_lock);
2471 do_notify_parent_cldstop(tsk, false, group_stop);
2472 read_unlock(&tasklist_lock);
2473 }
2474}
2475
2476EXPORT_SYMBOL(recalc_sigpending);
2477EXPORT_SYMBOL_GPL(dequeue_signal);
2478EXPORT_SYMBOL(flush_signals);
2479EXPORT_SYMBOL(force_sig);
2480EXPORT_SYMBOL(send_sig);
2481EXPORT_SYMBOL(send_sig_info);
2482EXPORT_SYMBOL(sigprocmask);
2483EXPORT_SYMBOL(block_all_signals);
2484EXPORT_SYMBOL(unblock_all_signals);
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494SYSCALL_DEFINE0(restart_syscall)
2495{
2496 struct restart_block *restart = ¤t_thread_info()->restart_block;
2497 return restart->fn(restart);
2498}
2499
2500long do_no_restart_syscall(struct restart_block *param)
2501{
2502 return -EINTR;
2503}
2504
2505static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2506{
2507 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2508 sigset_t newblocked;
2509
2510 sigandnsets(&newblocked, newset, ¤t->blocked);
2511 retarget_shared_pending(tsk, &newblocked);
2512 }
2513 tsk->blocked = *newset;
2514 recalc_sigpending();
2515}
2516
2517
2518
2519
2520
2521
2522
2523
2524void set_current_blocked(sigset_t *newset)
2525{
2526 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2527 __set_current_blocked(newset);
2528}
2529
2530void __set_current_blocked(const sigset_t *newset)
2531{
2532 struct task_struct *tsk = current;
2533
2534 spin_lock_irq(&tsk->sighand->siglock);
2535 __set_task_blocked(tsk, newset);
2536 spin_unlock_irq(&tsk->sighand->siglock);
2537}
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2548{
2549 struct task_struct *tsk = current;
2550 sigset_t newset;
2551
2552
2553 if (oldset)
2554 *oldset = tsk->blocked;
2555
2556 switch (how) {
2557 case SIG_BLOCK:
2558 sigorsets(&newset, &tsk->blocked, set);
2559 break;
2560 case SIG_UNBLOCK:
2561 sigandnsets(&newset, &tsk->blocked, set);
2562 break;
2563 case SIG_SETMASK:
2564 newset = *set;
2565 break;
2566 default:
2567 return -EINVAL;
2568 }
2569
2570 __set_current_blocked(&newset);
2571 return 0;
2572}
2573
2574
2575
2576
2577
2578
2579
2580
2581SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2582 sigset_t __user *, oset, size_t, sigsetsize)
2583{
2584 sigset_t old_set, new_set;
2585 int error;
2586
2587
2588 if (sigsetsize != sizeof(sigset_t))
2589 return -EINVAL;
2590
2591 old_set = current->blocked;
2592
2593 if (nset) {
2594 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2595 return -EFAULT;
2596 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2597
2598 error = sigprocmask(how, &new_set, NULL);
2599 if (error)
2600 return error;
2601 }
2602
2603 if (oset) {
2604 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2605 return -EFAULT;
2606 }
2607
2608 return 0;
2609}
2610
2611#ifdef CONFIG_COMPAT
2612COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2613 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2614{
2615#ifdef __BIG_ENDIAN
2616 sigset_t old_set = current->blocked;
2617
2618
2619 if (sigsetsize != sizeof(sigset_t))
2620 return -EINVAL;
2621
2622 if (nset) {
2623 compat_sigset_t new32;
2624 sigset_t new_set;
2625 int error;
2626 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2627 return -EFAULT;
2628
2629 sigset_from_compat(&new_set, &new32);
2630 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2631
2632 error = sigprocmask(how, &new_set, NULL);
2633 if (error)
2634 return error;
2635 }
2636 if (oset) {
2637 compat_sigset_t old32;
2638 sigset_to_compat(&old32, &old_set);
2639 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2640 return -EFAULT;
2641 }
2642 return 0;
2643#else
2644 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2645 (sigset_t __user *)oset, sigsetsize);
2646#endif
2647}
2648#endif
2649
2650static int do_sigpending(void *set, unsigned long sigsetsize)
2651{
2652 if (sigsetsize > sizeof(sigset_t))
2653 return -EINVAL;
2654
2655 spin_lock_irq(¤t->sighand->siglock);
2656 sigorsets(set, ¤t->pending.signal,
2657 ¤t->signal->shared_pending.signal);
2658 spin_unlock_irq(¤t->sighand->siglock);
2659
2660
2661 sigandsets(set, ¤t->blocked, set);
2662 return 0;
2663}
2664
2665
2666
2667
2668
2669
2670
2671SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2672{
2673 sigset_t set;
2674 int err = do_sigpending(&set, sigsetsize);
2675 if (!err && copy_to_user(uset, &set, sigsetsize))
2676 err = -EFAULT;
2677 return err;
2678}
2679
2680#ifdef CONFIG_COMPAT
2681COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2682 compat_size_t, sigsetsize)
2683{
2684#ifdef __BIG_ENDIAN
2685 sigset_t set;
2686 int err = do_sigpending(&set, sigsetsize);
2687 if (!err) {
2688 compat_sigset_t set32;
2689 sigset_to_compat(&set32, &set);
2690
2691 if (copy_to_user(uset, &set32, sigsetsize))
2692 err = -EFAULT;
2693 }
2694 return err;
2695#else
2696 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2697#endif
2698}
2699#endif
2700
2701#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2702
2703int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2704{
2705 int err;
2706
2707 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2708 return -EFAULT;
2709 if (from->si_code < 0)
2710 return __copy_to_user(to, from, sizeof(siginfo_t))
2711 ? -EFAULT : 0;
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721 err = __put_user(from->si_signo, &to->si_signo);
2722 err |= __put_user(from->si_errno, &to->si_errno);
2723 err |= __put_user((short)from->si_code, &to->si_code);
2724 switch (from->si_code & __SI_MASK) {
2725 case __SI_KILL:
2726 err |= __put_user(from->si_pid, &to->si_pid);
2727 err |= __put_user(from->si_uid, &to->si_uid);
2728 break;
2729 case __SI_TIMER:
2730 err |= __put_user(from->si_tid, &to->si_tid);
2731 err |= __put_user(from->si_overrun, &to->si_overrun);
2732 err |= __put_user(from->si_ptr, &to->si_ptr);
2733 break;
2734 case __SI_POLL:
2735 err |= __put_user(from->si_band, &to->si_band);
2736 err |= __put_user(from->si_fd, &to->si_fd);
2737 break;
2738 case __SI_FAULT:
2739 err |= __put_user(from->si_addr, &to->si_addr);
2740#ifdef __ARCH_SI_TRAPNO
2741 err |= __put_user(from->si_trapno, &to->si_trapno);
2742#endif
2743#ifdef BUS_MCEERR_AO
2744
2745
2746
2747
2748 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2749 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2750#endif
2751 break;
2752 case __SI_CHLD:
2753 err |= __put_user(from->si_pid, &to->si_pid);
2754 err |= __put_user(from->si_uid, &to->si_uid);
2755 err |= __put_user(from->si_status, &to->si_status);
2756 err |= __put_user(from->si_utime, &to->si_utime);
2757 err |= __put_user(from->si_stime, &to->si_stime);
2758 break;
2759 case __SI_RT:
2760 case __SI_MESGQ:
2761 err |= __put_user(from->si_pid, &to->si_pid);
2762 err |= __put_user(from->si_uid, &to->si_uid);
2763 err |= __put_user(from->si_ptr, &to->si_ptr);
2764 break;
2765#ifdef __ARCH_SIGSYS
2766 case __SI_SYS:
2767 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2768 err |= __put_user(from->si_syscall, &to->si_syscall);
2769 err |= __put_user(from->si_arch, &to->si_arch);
2770 break;
2771#endif
2772 default:
2773 err |= __put_user(from->si_pid, &to->si_pid);
2774 err |= __put_user(from->si_uid, &to->si_uid);
2775 break;
2776 }
2777 return err;
2778}
2779
2780#endif
2781
2782
2783
2784
2785
2786
2787
2788int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2789 const struct timespec *ts)
2790{
2791 struct task_struct *tsk = current;
2792 long timeout = MAX_SCHEDULE_TIMEOUT;
2793 sigset_t mask = *which;
2794 int sig;
2795
2796 if (ts) {
2797 if (!timespec_valid(ts))
2798 return -EINVAL;
2799 timeout = timespec_to_jiffies(ts);
2800
2801
2802
2803
2804 if (ts->tv_sec || ts->tv_nsec)
2805 timeout++;
2806 }
2807
2808
2809
2810
2811 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2812 signotset(&mask);
2813
2814 spin_lock_irq(&tsk->sighand->siglock);
2815 sig = dequeue_signal(tsk, &mask, info);
2816 if (!sig && timeout) {
2817
2818
2819
2820
2821
2822
2823 tsk->real_blocked = tsk->blocked;
2824 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2825 recalc_sigpending();
2826 spin_unlock_irq(&tsk->sighand->siglock);
2827
2828 timeout = freezable_schedule_timeout_interruptible(timeout);
2829
2830 spin_lock_irq(&tsk->sighand->siglock);
2831 __set_task_blocked(tsk, &tsk->real_blocked);
2832 sigemptyset(&tsk->real_blocked);
2833 sig = dequeue_signal(tsk, &mask, info);
2834 }
2835 spin_unlock_irq(&tsk->sighand->siglock);
2836
2837 if (sig)
2838 return sig;
2839 return timeout ? -EINTR : -EAGAIN;
2840}
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2851 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2852 size_t, sigsetsize)
2853{
2854 sigset_t these;
2855 struct timespec ts;
2856 siginfo_t info;
2857 int ret;
2858
2859
2860 if (sigsetsize != sizeof(sigset_t))
2861 return -EINVAL;
2862
2863 if (copy_from_user(&these, uthese, sizeof(these)))
2864 return -EFAULT;
2865
2866 if (uts) {
2867 if (copy_from_user(&ts, uts, sizeof(ts)))
2868 return -EFAULT;
2869 }
2870
2871 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2872
2873 if (ret > 0 && uinfo) {
2874 if (copy_siginfo_to_user(uinfo, &info))
2875 ret = -EFAULT;
2876 }
2877
2878 return ret;
2879}
2880
2881
2882
2883
2884
2885
2886SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2887{
2888 struct siginfo info;
2889
2890 info.si_signo = sig;
2891 info.si_errno = 0;
2892 info.si_code = SI_USER;
2893 info.si_pid = task_tgid_vnr(current);
2894 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2895
2896 return kill_something_info(sig, &info, pid);
2897}
2898
2899static int
2900do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2901{
2902 struct task_struct *p;
2903 int error = -ESRCH;
2904
2905 rcu_read_lock();
2906 p = find_task_by_vpid(pid);
2907 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2908 error = check_kill_permission(sig, info, p);
2909
2910
2911
2912
2913 if (!error && sig) {
2914 error = do_send_sig_info(sig, info, p, false);
2915
2916
2917
2918
2919
2920 if (unlikely(error == -ESRCH))
2921 error = 0;
2922 }
2923 }
2924 rcu_read_unlock();
2925
2926 return error;
2927}
2928
2929static int do_tkill(pid_t tgid, pid_t pid, int sig)
2930{
2931 struct siginfo info = {};
2932
2933 info.si_signo = sig;
2934 info.si_errno = 0;
2935 info.si_code = SI_TKILL;
2936 info.si_pid = task_tgid_vnr(current);
2937 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2938
2939 return do_send_specific(tgid, pid, sig, &info);
2940}
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2953{
2954
2955 if (pid <= 0 || tgid <= 0)
2956 return -EINVAL;
2957
2958 return do_tkill(tgid, pid, sig);
2959}
2960
2961
2962
2963
2964
2965
2966
2967
2968SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2969{
2970
2971 if (pid <= 0)
2972 return -EINVAL;
2973
2974 return do_tkill(0, pid, sig);
2975}
2976
2977static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
2978{
2979
2980
2981
2982 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
2983 (task_pid_vnr(current) != pid)) {
2984
2985 WARN_ON_ONCE(info->si_code < 0);
2986 return -EPERM;
2987 }
2988 info->si_signo = sig;
2989
2990
2991 return kill_proc_info(sig, info, pid);
2992}
2993
2994
2995
2996
2997
2998
2999
3000SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3001 siginfo_t __user *, uinfo)
3002{
3003 siginfo_t info;
3004 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3005 return -EFAULT;
3006 return do_rt_sigqueueinfo(pid, sig, &info);
3007}
3008
3009#ifdef CONFIG_COMPAT
3010COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3011 compat_pid_t, pid,
3012 int, sig,
3013 struct compat_siginfo __user *, uinfo)
3014{
3015 siginfo_t info;
3016 int ret = copy_siginfo_from_user32(&info, uinfo);
3017 if (unlikely(ret))
3018 return ret;
3019 return do_rt_sigqueueinfo(pid, sig, &info);
3020}
3021#endif
3022
3023static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3024{
3025
3026 if (pid <= 0 || tgid <= 0)
3027 return -EINVAL;
3028
3029
3030
3031
3032 if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
3033 (task_pid_vnr(current) != pid)) {
3034
3035 WARN_ON_ONCE(info->si_code < 0);
3036 return -EPERM;
3037 }
3038 info->si_signo = sig;
3039
3040 return do_send_specific(tgid, pid, sig, info);
3041}
3042
3043SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3044 siginfo_t __user *, uinfo)
3045{
3046 siginfo_t info;
3047
3048 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3049 return -EFAULT;
3050
3051 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3052}
3053
3054#ifdef CONFIG_COMPAT
3055COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3056 compat_pid_t, tgid,
3057 compat_pid_t, pid,
3058 int, sig,
3059 struct compat_siginfo __user *, uinfo)
3060{
3061 siginfo_t info;
3062
3063 if (copy_siginfo_from_user32(&info, uinfo))
3064 return -EFAULT;
3065 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3066}
3067#endif
3068
3069
3070
3071
3072void kernel_sigaction(int sig, __sighandler_t action)
3073{
3074 spin_lock_irq(¤t->sighand->siglock);
3075 current->sighand->action[sig - 1].sa.sa_handler = action;
3076 if (action == SIG_IGN) {
3077 sigset_t mask;
3078
3079 sigemptyset(&mask);
3080 sigaddset(&mask, sig);
3081
3082 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3083 flush_sigqueue_mask(&mask, ¤t->pending);
3084 recalc_sigpending();
3085 }
3086 spin_unlock_irq(¤t->sighand->siglock);
3087}
3088EXPORT_SYMBOL(kernel_sigaction);
3089
3090int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3091{
3092 struct task_struct *p = current, *t;
3093 struct k_sigaction *k;
3094 sigset_t mask;
3095
3096 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3097 return -EINVAL;
3098
3099 k = &p->sighand->action[sig-1];
3100
3101 spin_lock_irq(&p->sighand->siglock);
3102 if (oact)
3103 *oact = *k;
3104
3105 if (act) {
3106 sigdelsetmask(&act->sa.sa_mask,
3107 sigmask(SIGKILL) | sigmask(SIGSTOP));
3108 *k = *act;
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3121 sigemptyset(&mask);
3122 sigaddset(&mask, sig);
3123 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3124 for_each_thread(p, t)
3125 flush_sigqueue_mask(&mask, &t->pending);
3126 }
3127 }
3128
3129 spin_unlock_irq(&p->sighand->siglock);
3130 return 0;
3131}
3132
3133static int
3134do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3135{
3136 stack_t oss;
3137 int error;
3138
3139 oss.ss_sp = (void __user *) current->sas_ss_sp;
3140 oss.ss_size = current->sas_ss_size;
3141 oss.ss_flags = sas_ss_flags(sp);
3142
3143 if (uss) {
3144 void __user *ss_sp;
3145 size_t ss_size;
3146 int ss_flags;
3147
3148 error = -EFAULT;
3149 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3150 goto out;
3151 error = __get_user(ss_sp, &uss->ss_sp) |
3152 __get_user(ss_flags, &uss->ss_flags) |
3153 __get_user(ss_size, &uss->ss_size);
3154 if (error)
3155 goto out;
3156
3157 error = -EPERM;
3158 if (on_sig_stack(sp))
3159 goto out;
3160
3161 error = -EINVAL;
3162
3163
3164
3165
3166
3167
3168
3169 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3170 goto out;
3171
3172 if (ss_flags == SS_DISABLE) {
3173 ss_size = 0;
3174 ss_sp = NULL;
3175 } else {
3176 error = -ENOMEM;
3177 if (ss_size < MINSIGSTKSZ)
3178 goto out;
3179 }
3180
3181 current->sas_ss_sp = (unsigned long) ss_sp;
3182 current->sas_ss_size = ss_size;
3183 }
3184
3185 error = 0;
3186 if (uoss) {
3187 error = -EFAULT;
3188 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3189 goto out;
3190 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3191 __put_user(oss.ss_size, &uoss->ss_size) |
3192 __put_user(oss.ss_flags, &uoss->ss_flags);
3193 }
3194
3195out:
3196 return error;
3197}
3198SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3199{
3200 return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3201}
3202
3203int restore_altstack(const stack_t __user *uss)
3204{
3205 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3206
3207 return err == -EFAULT ? err : 0;
3208}
3209
3210int __save_altstack(stack_t __user *uss, unsigned long sp)
3211{
3212 struct task_struct *t = current;
3213 return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3214 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3215 __put_user(t->sas_ss_size, &uss->ss_size);
3216}
3217
3218#ifdef CONFIG_COMPAT
3219COMPAT_SYSCALL_DEFINE2(sigaltstack,
3220 const compat_stack_t __user *, uss_ptr,
3221 compat_stack_t __user *, uoss_ptr)
3222{
3223 stack_t uss, uoss;
3224 int ret;
3225 mm_segment_t seg;
3226
3227 if (uss_ptr) {
3228 compat_stack_t uss32;
3229
3230 memset(&uss, 0, sizeof(stack_t));
3231 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3232 return -EFAULT;
3233 uss.ss_sp = compat_ptr(uss32.ss_sp);
3234 uss.ss_flags = uss32.ss_flags;
3235 uss.ss_size = uss32.ss_size;
3236 }
3237 seg = get_fs();
3238 set_fs(KERNEL_DS);
3239 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3240 (stack_t __force __user *) &uoss,
3241 compat_user_stack_pointer());
3242 set_fs(seg);
3243 if (ret >= 0 && uoss_ptr) {
3244 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3245 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3246 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3247 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3248 ret = -EFAULT;
3249 }
3250 return ret;
3251}
3252
3253int compat_restore_altstack(const compat_stack_t __user *uss)
3254{
3255 int err = compat_sys_sigaltstack(uss, NULL);
3256
3257 return err == -EFAULT ? err : 0;
3258}
3259
3260int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3261{
3262 struct task_struct *t = current;
3263 return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
3264 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3265 __put_user(t->sas_ss_size, &uss->ss_size);
3266}
3267#endif
3268
3269#ifdef __ARCH_WANT_SYS_SIGPENDING
3270
3271
3272
3273
3274
3275SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3276{
3277 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3278}
3279
3280#endif
3281
3282#ifdef __ARCH_WANT_SYS_SIGPROCMASK
3283
3284
3285
3286
3287
3288
3289
3290
3291
3292
3293SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3294 old_sigset_t __user *, oset)
3295{
3296 old_sigset_t old_set, new_set;
3297 sigset_t new_blocked;
3298
3299 old_set = current->blocked.sig[0];
3300
3301 if (nset) {
3302 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3303 return -EFAULT;
3304
3305 new_blocked = current->blocked;
3306
3307 switch (how) {
3308 case SIG_BLOCK:
3309 sigaddsetmask(&new_blocked, new_set);
3310 break;
3311 case SIG_UNBLOCK:
3312 sigdelsetmask(&new_blocked, new_set);
3313 break;
3314 case SIG_SETMASK:
3315 new_blocked.sig[0] = new_set;
3316 break;
3317 default:
3318 return -EINVAL;
3319 }
3320
3321 set_current_blocked(&new_blocked);
3322 }
3323
3324 if (oset) {
3325 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3326 return -EFAULT;
3327 }
3328
3329 return 0;
3330}
3331#endif
3332
3333#ifndef CONFIG_ODD_RT_SIGACTION
3334
3335
3336
3337
3338
3339
3340
3341SYSCALL_DEFINE4(rt_sigaction, int, sig,
3342 const struct sigaction __user *, act,
3343 struct sigaction __user *, oact,
3344 size_t, sigsetsize)
3345{
3346 struct k_sigaction new_sa, old_sa;
3347 int ret = -EINVAL;
3348
3349
3350 if (sigsetsize != sizeof(sigset_t))
3351 goto out;
3352
3353 if (act) {
3354 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3355 return -EFAULT;
3356 }
3357
3358 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3359
3360 if (!ret && oact) {
3361 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3362 return -EFAULT;
3363 }
3364out:
3365 return ret;
3366}
3367#ifdef CONFIG_COMPAT
3368COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3369 const struct compat_sigaction __user *, act,
3370 struct compat_sigaction __user *, oact,
3371 compat_size_t, sigsetsize)
3372{
3373 struct k_sigaction new_ka, old_ka;
3374 compat_sigset_t mask;
3375#ifdef __ARCH_HAS_SA_RESTORER
3376 compat_uptr_t restorer;
3377#endif
3378 int ret;
3379
3380
3381 if (sigsetsize != sizeof(compat_sigset_t))
3382 return -EINVAL;
3383
3384 if (act) {
3385 compat_uptr_t handler;
3386 ret = get_user(handler, &act->sa_handler);
3387 new_ka.sa.sa_handler = compat_ptr(handler);
3388#ifdef __ARCH_HAS_SA_RESTORER
3389 ret |= get_user(restorer, &act->sa_restorer);
3390 new_ka.sa.sa_restorer = compat_ptr(restorer);
3391#endif
3392 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3393 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3394 if (ret)
3395 return -EFAULT;
3396 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3397 }
3398
3399 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3400 if (!ret && oact) {
3401 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3402 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3403 &oact->sa_handler);
3404 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3405 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3406#ifdef __ARCH_HAS_SA_RESTORER
3407 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3408 &oact->sa_restorer);
3409#endif
3410 }
3411 return ret;
3412}
3413#endif
3414#endif
3415
3416#ifdef CONFIG_OLD_SIGACTION
3417SYSCALL_DEFINE3(sigaction, int, sig,
3418 const struct old_sigaction __user *, act,
3419 struct old_sigaction __user *, oact)
3420{
3421 struct k_sigaction new_ka, old_ka;
3422 int ret;
3423
3424 if (act) {
3425 old_sigset_t mask;
3426 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3427 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3428 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3429 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3430 __get_user(mask, &act->sa_mask))
3431 return -EFAULT;
3432#ifdef __ARCH_HAS_KA_RESTORER
3433 new_ka.ka_restorer = NULL;
3434#endif
3435 siginitset(&new_ka.sa.sa_mask, mask);
3436 }
3437
3438 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3439
3440 if (!ret && oact) {
3441 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3442 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3443 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3444 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3445 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3446 return -EFAULT;
3447 }
3448
3449 return ret;
3450}
3451#endif
3452#ifdef CONFIG_COMPAT_OLD_SIGACTION
3453COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3454 const struct compat_old_sigaction __user *, act,
3455 struct compat_old_sigaction __user *, oact)
3456{
3457 struct k_sigaction new_ka, old_ka;
3458 int ret;
3459 compat_old_sigset_t mask;
3460 compat_uptr_t handler, restorer;
3461
3462 if (act) {
3463 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3464 __get_user(handler, &act->sa_handler) ||
3465 __get_user(restorer, &act->sa_restorer) ||
3466 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3467 __get_user(mask, &act->sa_mask))
3468 return -EFAULT;
3469
3470#ifdef __ARCH_HAS_KA_RESTORER
3471 new_ka.ka_restorer = NULL;
3472#endif
3473 new_ka.sa.sa_handler = compat_ptr(handler);
3474 new_ka.sa.sa_restorer = compat_ptr(restorer);
3475 siginitset(&new_ka.sa.sa_mask, mask);
3476 }
3477
3478 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3479
3480 if (!ret && oact) {
3481 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3482 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3483 &oact->sa_handler) ||
3484 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3485 &oact->sa_restorer) ||
3486 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3487 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3488 return -EFAULT;
3489 }
3490 return ret;
3491}
3492#endif
3493
3494#ifdef CONFIG_SGETMASK_SYSCALL
3495
3496
3497
3498
3499SYSCALL_DEFINE0(sgetmask)
3500{
3501
3502 return current->blocked.sig[0];
3503}
3504
3505SYSCALL_DEFINE1(ssetmask, int, newmask)
3506{
3507 int old = current->blocked.sig[0];
3508 sigset_t newset;
3509
3510 siginitset(&newset, newmask);
3511 set_current_blocked(&newset);
3512
3513 return old;
3514}
3515#endif
3516
3517#ifdef __ARCH_WANT_SYS_SIGNAL
3518
3519
3520
3521SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3522{
3523 struct k_sigaction new_sa, old_sa;
3524 int ret;
3525
3526 new_sa.sa.sa_handler = handler;
3527 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3528 sigemptyset(&new_sa.sa.sa_mask);
3529
3530 ret = do_sigaction(sig, &new_sa, &old_sa);
3531
3532 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3533}
3534#endif
3535
3536#ifdef __ARCH_WANT_SYS_PAUSE
3537
3538SYSCALL_DEFINE0(pause)
3539{
3540 while (!signal_pending(current)) {
3541 current->state = TASK_INTERRUPTIBLE;
3542 schedule();
3543 }
3544 return -ERESTARTNOHAND;
3545}
3546
3547#endif
3548
3549int sigsuspend(sigset_t *set)
3550{
3551 current->saved_sigmask = current->blocked;
3552 set_current_blocked(set);
3553
3554 current->state = TASK_INTERRUPTIBLE;
3555 schedule();
3556 set_restore_sigmask();
3557 return -ERESTARTNOHAND;
3558}
3559
3560
3561
3562
3563
3564
3565
3566SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3567{
3568 sigset_t newset;
3569
3570
3571 if (sigsetsize != sizeof(sigset_t))
3572 return -EINVAL;
3573
3574 if (copy_from_user(&newset, unewset, sizeof(newset)))
3575 return -EFAULT;
3576 return sigsuspend(&newset);
3577}
3578
3579#ifdef CONFIG_COMPAT
3580COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3581{
3582#ifdef __BIG_ENDIAN
3583 sigset_t newset;
3584 compat_sigset_t newset32;
3585
3586
3587 if (sigsetsize != sizeof(sigset_t))
3588 return -EINVAL;
3589
3590 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3591 return -EFAULT;
3592 sigset_from_compat(&newset, &newset32);
3593 return sigsuspend(&newset);
3594#else
3595
3596 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3597#endif
3598}
3599#endif
3600
3601#ifdef CONFIG_OLD_SIGSUSPEND
3602SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3603{
3604 sigset_t blocked;
3605 siginitset(&blocked, mask);
3606 return sigsuspend(&blocked);
3607}
3608#endif
3609#ifdef CONFIG_OLD_SIGSUSPEND3
3610SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3611{
3612 sigset_t blocked;
3613 siginitset(&blocked, mask);
3614 return sigsuspend(&blocked);
3615}
3616#endif
3617
3618__weak const char *arch_vma_name(struct vm_area_struct *vma)
3619{
3620 return NULL;
3621}
3622
3623void __init signals_init(void)
3624{
3625 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3626}
3627
3628#ifdef CONFIG_KGDB_KDB
3629#include <linux/kdb.h>
3630
3631
3632
3633
3634
3635
3636void
3637kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3638{
3639 static struct task_struct *kdb_prev_t;
3640 int sig, new_t;
3641 if (!spin_trylock(&t->sighand->siglock)) {
3642 kdb_printf("Can't do kill command now.\n"
3643 "The sigmask lock is held somewhere else in "
3644 "kernel, try again later\n");
3645 return;
3646 }
3647 spin_unlock(&t->sighand->siglock);
3648 new_t = kdb_prev_t != t;
3649 kdb_prev_t = t;
3650 if (t->state != TASK_RUNNING && new_t) {
3651 kdb_printf("Process is not RUNNING, sending a signal from "
3652 "kdb risks deadlock\n"
3653 "on the run queue locks. "
3654 "The signal has _not_ been sent.\n"
3655 "Reissue the kill command if you want to risk "
3656 "the deadlock.\n");
3657 return;
3658 }
3659 sig = info->si_signo;
3660 if (send_sig_info(sig, info, t))
3661 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3662 sig, t->pid);
3663 else
3664 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3665}
3666#endif
3667