1
2
3
4
5
6
7
8
9
10
11
12
13#include <linux/slab.h>
14#include <linux/export.h>
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/coredump.h>
21#include <linux/security.h>
22#include <linux/syscalls.h>
23#include <linux/ptrace.h>
24#include <linux/signal.h>
25#include <linux/signalfd.h>
26#include <linux/ratelimit.h>
27#include <linux/tracehook.h>
28#include <linux/capability.h>
29#include <linux/freezer.h>
30#include <linux/pid_namespace.h>
31#include <linux/nsproxy.h>
32#include <linux/user_namespace.h>
33#include <linux/uprobes.h>
34#include <linux/compat.h>
35#include <linux/cn_proc.h>
36#include <linux/compiler.h>
37
38#define CREATE_TRACE_POINTS
39#include <trace/events/signal.h>
40
41#include <asm/param.h>
42#include <asm/uaccess.h>
43#include <asm/unistd.h>
44#include <asm/siginfo.h>
45#include <asm/cacheflush.h>
46#include "audit.h"
47
48
49
50
51
52static struct kmem_cache *sigqueue_cachep;
53
54int print_fatal_signals __read_mostly;
55
56static void __user *sig_handler(struct task_struct *t, int sig)
57{
58 return t->sighand->action[sig - 1].sa.sa_handler;
59}
60
61static int sig_handler_ignored(void __user *handler, int sig)
62{
63
64 return handler == SIG_IGN ||
65 (handler == SIG_DFL && sig_kernel_ignore(sig));
66}
67
68static int sig_task_ignored(struct task_struct *t, int sig, bool force)
69{
70 void __user *handler;
71
72 handler = sig_handler(t, sig);
73
74 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
75 handler == SIG_DFL && !force)
76 return 1;
77
78 return sig_handler_ignored(handler, sig);
79}
80
81static int sig_ignored(struct task_struct *t, int sig, bool force)
82{
83
84
85
86
87
88 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
89 return 0;
90
91 if (!sig_task_ignored(t, sig, force))
92 return 0;
93
94
95
96
97 return !t->ptrace;
98}
99
100
101
102
103
104static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
105{
106 unsigned long ready;
107 long i;
108
109 switch (_NSIG_WORDS) {
110 default:
111 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
112 ready |= signal->sig[i] &~ blocked->sig[i];
113 break;
114
115 case 4: ready = signal->sig[3] &~ blocked->sig[3];
116 ready |= signal->sig[2] &~ blocked->sig[2];
117 ready |= signal->sig[1] &~ blocked->sig[1];
118 ready |= signal->sig[0] &~ blocked->sig[0];
119 break;
120
121 case 2: ready = signal->sig[1] &~ blocked->sig[1];
122 ready |= signal->sig[0] &~ blocked->sig[0];
123 break;
124
125 case 1: ready = signal->sig[0] &~ blocked->sig[0];
126 }
127 return ready != 0;
128}
129
130#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
131
132static int recalc_sigpending_tsk(struct task_struct *t)
133{
134 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
135 PENDING(&t->pending, &t->blocked) ||
136 PENDING(&t->signal->shared_pending, &t->blocked)) {
137 set_tsk_thread_flag(t, TIF_SIGPENDING);
138 return 1;
139 }
140
141
142
143
144
145 return 0;
146}
147
148
149
150
151
152void recalc_sigpending_and_wake(struct task_struct *t)
153{
154 if (recalc_sigpending_tsk(t))
155 signal_wake_up(t, 0);
156}
157
158void recalc_sigpending(void)
159{
160 if (!recalc_sigpending_tsk(current) && !freezing(current))
161 clear_thread_flag(TIF_SIGPENDING);
162
163}
164
165
166
167#define SYNCHRONOUS_MASK \
168 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
169 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
170
171int next_signal(struct sigpending *pending, sigset_t *mask)
172{
173 unsigned long i, *s, *m, x;
174 int sig = 0;
175
176 s = pending->signal.sig;
177 m = mask->sig;
178
179
180
181
182
183 x = *s &~ *m;
184 if (x) {
185 if (x & SYNCHRONOUS_MASK)
186 x &= SYNCHRONOUS_MASK;
187 sig = ffz(~x) + 1;
188 return sig;
189 }
190
191 switch (_NSIG_WORDS) {
192 default:
193 for (i = 1; i < _NSIG_WORDS; ++i) {
194 x = *++s &~ *++m;
195 if (!x)
196 continue;
197 sig = ffz(~x) + i*_NSIG_BPW + 1;
198 break;
199 }
200 break;
201
202 case 2:
203 x = s[1] &~ m[1];
204 if (!x)
205 break;
206 sig = ffz(~x) + _NSIG_BPW + 1;
207 break;
208
209 case 1:
210
211 break;
212 }
213
214 return sig;
215}
216
217static inline void print_dropped_signal(int sig)
218{
219 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
220
221 if (!print_fatal_signals)
222 return;
223
224 if (!__ratelimit(&ratelimit_state))
225 return;
226
227 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
228 current->comm, current->pid, sig);
229}
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
249{
250 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
251 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
252 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
253
254 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
255 return false;
256
257 if (mask & JOBCTL_STOP_SIGMASK)
258 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
259
260 task->jobctl |= mask;
261 return true;
262}
263
264
265
266
267
268
269
270
271
272
273
274
275
276void task_clear_jobctl_trapping(struct task_struct *task)
277{
278 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
279 task->jobctl &= ~JOBCTL_TRAPPING;
280 smp_mb();
281 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
282 }
283}
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
301{
302 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
303
304 if (mask & JOBCTL_STOP_PENDING)
305 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
306
307 task->jobctl &= ~mask;
308
309 if (!(task->jobctl & JOBCTL_PENDING_MASK))
310 task_clear_jobctl_trapping(task);
311}
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329static bool task_participate_group_stop(struct task_struct *task)
330{
331 struct signal_struct *sig = task->signal;
332 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
333
334 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
335
336 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
337
338 if (!consume)
339 return false;
340
341 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
342 sig->group_stop_count--;
343
344
345
346
347
348 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
349 sig->flags = SIGNAL_STOP_STOPPED;
350 return true;
351 }
352 return false;
353}
354
355
356
357
358
359
360static struct sigqueue *
361__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
362{
363 struct sigqueue *q = NULL;
364 struct user_struct *user;
365
366
367
368
369
370 rcu_read_lock();
371 user = get_uid(__task_cred(t)->user);
372 atomic_inc(&user->sigpending);
373 rcu_read_unlock();
374
375 if (override_rlimit ||
376 atomic_read(&user->sigpending) <=
377 task_rlimit(t, RLIMIT_SIGPENDING)) {
378 q = kmem_cache_alloc(sigqueue_cachep, flags);
379 } else {
380 print_dropped_signal(sig);
381 }
382
383 if (unlikely(q == NULL)) {
384 atomic_dec(&user->sigpending);
385 free_uid(user);
386 } else {
387 INIT_LIST_HEAD(&q->list);
388 q->flags = 0;
389 q->user = user;
390 }
391
392 return q;
393}
394
395static void __sigqueue_free(struct sigqueue *q)
396{
397 if (q->flags & SIGQUEUE_PREALLOC)
398 return;
399 atomic_dec(&q->user->sigpending);
400 free_uid(q->user);
401 kmem_cache_free(sigqueue_cachep, q);
402}
403
404void flush_sigqueue(struct sigpending *queue)
405{
406 struct sigqueue *q;
407
408 sigemptyset(&queue->signal);
409 while (!list_empty(&queue->list)) {
410 q = list_entry(queue->list.next, struct sigqueue , list);
411 list_del_init(&q->list);
412 __sigqueue_free(q);
413 }
414}
415
416
417
418
419void __flush_signals(struct task_struct *t)
420{
421 clear_tsk_thread_flag(t, TIF_SIGPENDING);
422 flush_sigqueue(&t->pending);
423 flush_sigqueue(&t->signal->shared_pending);
424}
425
426void flush_signals(struct task_struct *t)
427{
428 unsigned long flags;
429
430 spin_lock_irqsave(&t->sighand->siglock, flags);
431 __flush_signals(t);
432 spin_unlock_irqrestore(&t->sighand->siglock, flags);
433}
434
435static void __flush_itimer_signals(struct sigpending *pending)
436{
437 sigset_t signal, retain;
438 struct sigqueue *q, *n;
439
440 signal = pending->signal;
441 sigemptyset(&retain);
442
443 list_for_each_entry_safe(q, n, &pending->list, list) {
444 int sig = q->info.si_signo;
445
446 if (likely(q->info.si_code != SI_TIMER)) {
447 sigaddset(&retain, sig);
448 } else {
449 sigdelset(&signal, sig);
450 list_del_init(&q->list);
451 __sigqueue_free(q);
452 }
453 }
454
455 sigorsets(&pending->signal, &signal, &retain);
456}
457
458void flush_itimer_signals(void)
459{
460 struct task_struct *tsk = current;
461 unsigned long flags;
462
463 spin_lock_irqsave(&tsk->sighand->siglock, flags);
464 __flush_itimer_signals(&tsk->pending);
465 __flush_itimer_signals(&tsk->signal->shared_pending);
466 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
467}
468
469void ignore_signals(struct task_struct *t)
470{
471 int i;
472
473 for (i = 0; i < _NSIG; ++i)
474 t->sighand->action[i].sa.sa_handler = SIG_IGN;
475
476 flush_signals(t);
477}
478
479
480
481
482
483void
484flush_signal_handlers(struct task_struct *t, int force_default)
485{
486 int i;
487 struct k_sigaction *ka = &t->sighand->action[0];
488 for (i = _NSIG ; i != 0 ; i--) {
489 if (force_default || ka->sa.sa_handler != SIG_IGN)
490 ka->sa.sa_handler = SIG_DFL;
491 ka->sa.sa_flags = 0;
492#ifdef __ARCH_HAS_SA_RESTORER
493 ka->sa.sa_restorer = NULL;
494#endif
495 sigemptyset(&ka->sa.sa_mask);
496 ka++;
497 }
498}
499
500int unhandled_signal(struct task_struct *tsk, int sig)
501{
502 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
503 if (is_global_init(tsk))
504 return 1;
505 if (handler != SIG_IGN && handler != SIG_DFL)
506 return 0;
507
508 return !tsk->ptrace;
509}
510
511
512
513
514
515
516
517
518
519
520void
521block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
522{
523 unsigned long flags;
524
525 spin_lock_irqsave(¤t->sighand->siglock, flags);
526 current->notifier_mask = mask;
527 current->notifier_data = priv;
528 current->notifier = notifier;
529 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
530}
531
532
533
534void
535unblock_all_signals(void)
536{
537 unsigned long flags;
538
539 spin_lock_irqsave(¤t->sighand->siglock, flags);
540 current->notifier = NULL;
541 current->notifier_data = NULL;
542 recalc_sigpending();
543 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
544}
545
546static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
547{
548 struct sigqueue *q, *first = NULL;
549
550
551
552
553
554 list_for_each_entry(q, &list->list, list) {
555 if (q->info.si_signo == sig) {
556 if (first)
557 goto still_pending;
558 first = q;
559 }
560 }
561
562 sigdelset(&list->signal, sig);
563
564 if (first) {
565still_pending:
566 list_del_init(&first->list);
567 copy_siginfo(info, &first->info);
568 __sigqueue_free(first);
569 } else {
570
571
572
573
574
575 info->si_signo = sig;
576 info->si_errno = 0;
577 info->si_code = SI_USER;
578 info->si_pid = 0;
579 info->si_uid = 0;
580 }
581}
582
583static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
584 siginfo_t *info)
585{
586 int sig = next_signal(pending, mask);
587
588 if (sig) {
589 if (current->notifier) {
590 if (sigismember(current->notifier_mask, sig)) {
591 if (!(current->notifier)(current->notifier_data)) {
592 clear_thread_flag(TIF_SIGPENDING);
593 return 0;
594 }
595 }
596 }
597
598 collect_signal(sig, pending, info);
599 }
600
601 return sig;
602}
603
604
605
606
607
608
609
610int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
611{
612 int signr;
613
614
615
616
617 signr = __dequeue_signal(&tsk->pending, mask, info);
618 if (!signr) {
619 signr = __dequeue_signal(&tsk->signal->shared_pending,
620 mask, info);
621
622
623
624
625
626
627
628
629
630
631
632
633
634 if (unlikely(signr == SIGALRM)) {
635 struct hrtimer *tmr = &tsk->signal->real_timer;
636
637 if (!hrtimer_is_queued(tmr) &&
638 tsk->signal->it_real_incr.tv64 != 0) {
639 hrtimer_forward(tmr, tmr->base->get_time(),
640 tsk->signal->it_real_incr);
641 hrtimer_restart(tmr);
642 }
643 }
644 }
645
646 recalc_sigpending();
647 if (!signr)
648 return 0;
649
650 if (unlikely(sig_kernel_stop(signr))) {
651
652
653
654
655
656
657
658
659
660
661
662
663 current->jobctl |= JOBCTL_STOP_DEQUEUED;
664 }
665 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
666
667
668
669
670
671
672 spin_unlock(&tsk->sighand->siglock);
673 do_schedule_next_timer(info);
674 spin_lock(&tsk->sighand->siglock);
675 }
676 return signr;
677}
678
679
680
681
682
683
684
685
686
687
688
689
690void signal_wake_up_state(struct task_struct *t, unsigned int state)
691{
692 set_tsk_thread_flag(t, TIF_SIGPENDING);
693
694
695
696
697
698
699
700 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
701 kick_process(t);
702}
703
704
705
706
707
708
709
710static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
711{
712 struct sigqueue *q, *n;
713 sigset_t m;
714
715 sigandsets(&m, mask, &s->signal);
716 if (sigisemptyset(&m))
717 return 0;
718
719 sigandnsets(&s->signal, &s->signal, mask);
720 list_for_each_entry_safe(q, n, &s->list, list) {
721 if (sigismember(mask, q->info.si_signo)) {
722 list_del_init(&q->list);
723 __sigqueue_free(q);
724 }
725 }
726 return 1;
727}
728
729static inline int is_si_special(const struct siginfo *info)
730{
731 return info <= SEND_SIG_FORCED;
732}
733
734static inline bool si_fromuser(const struct siginfo *info)
735{
736 return info == SEND_SIG_NOINFO ||
737 (!is_si_special(info) && SI_FROMUSER(info));
738}
739
740
741
742
743static int kill_ok_by_cred(struct task_struct *t)
744{
745 const struct cred *cred = current_cred();
746 const struct cred *tcred = __task_cred(t);
747
748 if (uid_eq(cred->euid, tcred->suid) ||
749 uid_eq(cred->euid, tcred->uid) ||
750 uid_eq(cred->uid, tcred->suid) ||
751 uid_eq(cred->uid, tcred->uid))
752 return 1;
753
754 if (ns_capable(tcred->user_ns, CAP_KILL))
755 return 1;
756
757 return 0;
758}
759
760
761
762
763
764static int check_kill_permission(int sig, struct siginfo *info,
765 struct task_struct *t)
766{
767 struct pid *sid;
768 int error;
769
770 if (!valid_signal(sig))
771 return -EINVAL;
772
773 if (!si_fromuser(info))
774 return 0;
775
776 error = audit_signal_info(sig, t);
777 if (error)
778 return error;
779
780 if (!same_thread_group(current, t) &&
781 !kill_ok_by_cred(t)) {
782 switch (sig) {
783 case SIGCONT:
784 sid = task_session(t);
785
786
787
788
789 if (!sid || sid == task_session(current))
790 break;
791 default:
792 return -EPERM;
793 }
794 }
795
796 return security_task_kill(t, info, sig, 0);
797}
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816static void ptrace_trap_notify(struct task_struct *t)
817{
818 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
819 assert_spin_locked(&t->sighand->siglock);
820
821 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
822 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
823}
824
825
826
827
828
829
830
831
832
833
834
835static bool prepare_signal(int sig, struct task_struct *p, bool force)
836{
837 struct signal_struct *signal = p->signal;
838 struct task_struct *t;
839 sigset_t flush;
840
841 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
842 if (signal->flags & SIGNAL_GROUP_COREDUMP)
843 return sig == SIGKILL;
844
845
846
847 } else if (sig_kernel_stop(sig)) {
848
849
850
851 siginitset(&flush, sigmask(SIGCONT));
852 flush_sigqueue_mask(&flush, &signal->shared_pending);
853 for_each_thread(p, t)
854 flush_sigqueue_mask(&flush, &t->pending);
855 } else if (sig == SIGCONT) {
856 unsigned int why;
857
858
859
860 siginitset(&flush, SIG_KERNEL_STOP_MASK);
861 flush_sigqueue_mask(&flush, &signal->shared_pending);
862 for_each_thread(p, t) {
863 flush_sigqueue_mask(&flush, &t->pending);
864 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
865 if (likely(!(t->ptrace & PT_SEIZED)))
866 wake_up_state(t, __TASK_STOPPED);
867 else
868 ptrace_trap_notify(t);
869 }
870
871
872
873
874
875
876
877
878
879 why = 0;
880 if (signal->flags & SIGNAL_STOP_STOPPED)
881 why |= SIGNAL_CLD_CONTINUED;
882 else if (signal->group_stop_count)
883 why |= SIGNAL_CLD_STOPPED;
884
885 if (why) {
886
887
888
889
890
891 signal->flags = why | SIGNAL_STOP_CONTINUED;
892 signal->group_stop_count = 0;
893 signal->group_exit_code = 0;
894 }
895 }
896
897 return !sig_ignored(p, sig, force);
898}
899
900
901
902
903
904
905
906
907
908static inline int wants_signal(int sig, struct task_struct *p)
909{
910 if (sigismember(&p->blocked, sig))
911 return 0;
912 if (p->flags & PF_EXITING)
913 return 0;
914 if (sig == SIGKILL)
915 return 1;
916 if (task_is_stopped_or_traced(p))
917 return 0;
918 return task_curr(p) || !signal_pending(p);
919}
920
921static void complete_signal(int sig, struct task_struct *p, int group)
922{
923 struct signal_struct *signal = p->signal;
924 struct task_struct *t;
925
926
927
928
929
930
931
932 if (wants_signal(sig, p))
933 t = p;
934 else if (!group || thread_group_empty(p))
935
936
937
938
939 return;
940 else {
941
942
943
944 t = signal->curr_target;
945 while (!wants_signal(sig, t)) {
946 t = next_thread(t);
947 if (t == signal->curr_target)
948
949
950
951
952
953 return;
954 }
955 signal->curr_target = t;
956 }
957
958
959
960
961
962 if (sig_fatal(p, sig) &&
963 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
964 !sigismember(&t->real_blocked, sig) &&
965 (sig == SIGKILL || !t->ptrace)) {
966
967
968
969 if (!sig_kernel_coredump(sig)) {
970
971
972
973
974
975
976 signal->flags = SIGNAL_GROUP_EXIT;
977 signal->group_exit_code = sig;
978 signal->group_stop_count = 0;
979 t = p;
980 do {
981 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
982 sigaddset(&t->pending.signal, SIGKILL);
983 signal_wake_up(t, 1);
984 } while_each_thread(p, t);
985 return;
986 }
987 }
988
989
990
991
992
993 signal_wake_up(t, sig == SIGKILL);
994 return;
995}
996
997static inline int legacy_queue(struct sigpending *signals, int sig)
998{
999 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1000}
1001
1002#ifdef CONFIG_USER_NS
1003static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1004{
1005 if (current_user_ns() == task_cred_xxx(t, user_ns))
1006 return;
1007
1008 if (SI_FROMKERNEL(info))
1009 return;
1010
1011 rcu_read_lock();
1012 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1013 make_kuid(current_user_ns(), info->si_uid));
1014 rcu_read_unlock();
1015}
1016#else
1017static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
1018{
1019 return;
1020}
1021#endif
1022
1023static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
1024 int group, int from_ancestor_ns)
1025{
1026 struct sigpending *pending;
1027 struct sigqueue *q;
1028 int override_rlimit;
1029 int ret = 0, result;
1030
1031 assert_spin_locked(&t->sighand->siglock);
1032
1033 result = TRACE_SIGNAL_IGNORED;
1034 if (!prepare_signal(sig, t,
1035 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1036 goto ret;
1037
1038 pending = group ? &t->signal->shared_pending : &t->pending;
1039
1040
1041
1042
1043
1044 result = TRACE_SIGNAL_ALREADY_PENDING;
1045 if (legacy_queue(pending, sig))
1046 goto ret;
1047
1048 result = TRACE_SIGNAL_DELIVERED;
1049
1050
1051
1052
1053 if (info == SEND_SIG_FORCED)
1054 goto out_set;
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 if (sig < SIGRTMIN)
1066 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1067 else
1068 override_rlimit = 0;
1069
1070 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
1071 override_rlimit);
1072 if (q) {
1073 list_add_tail(&q->list, &pending->list);
1074 switch ((unsigned long) info) {
1075 case (unsigned long) SEND_SIG_NOINFO:
1076 q->info.si_signo = sig;
1077 q->info.si_errno = 0;
1078 q->info.si_code = SI_USER;
1079 q->info.si_pid = task_tgid_nr_ns(current,
1080 task_active_pid_ns(t));
1081 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1082 break;
1083 case (unsigned long) SEND_SIG_PRIV:
1084 q->info.si_signo = sig;
1085 q->info.si_errno = 0;
1086 q->info.si_code = SI_KERNEL;
1087 q->info.si_pid = 0;
1088 q->info.si_uid = 0;
1089 break;
1090 default:
1091 copy_siginfo(&q->info, info);
1092 if (from_ancestor_ns)
1093 q->info.si_pid = 0;
1094 break;
1095 }
1096
1097 userns_fixup_signal_uid(&q->info, t);
1098
1099 } else if (!is_si_special(info)) {
1100 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1101
1102
1103
1104
1105
1106 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1107 ret = -EAGAIN;
1108 goto ret;
1109 } else {
1110
1111
1112
1113
1114 result = TRACE_SIGNAL_LOSE_INFO;
1115 }
1116 }
1117
1118out_set:
1119 signalfd_notify(t, sig);
1120 sigaddset(&pending->signal, sig);
1121 complete_signal(sig, t, group);
1122ret:
1123 trace_signal_generate(sig, info, t, group, result);
1124 return ret;
1125}
1126
1127static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1128 int group)
1129{
1130 int from_ancestor_ns = 0;
1131
1132#ifdef CONFIG_PID_NS
1133 from_ancestor_ns = si_fromuser(info) &&
1134 !task_pid_nr_ns(current, task_active_pid_ns(t));
1135#endif
1136
1137 return __send_signal(sig, info, t, group, from_ancestor_ns);
1138}
1139
1140static void print_fatal_signal(int signr)
1141{
1142 struct pt_regs *regs = signal_pt_regs();
1143 printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1144
1145#if defined(__i386__) && !defined(__arch_um__)
1146 printk(KERN_INFO "code at %08lx: ", regs->ip);
1147 {
1148 int i;
1149 for (i = 0; i < 16; i++) {
1150 unsigned char insn;
1151
1152 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1153 break;
1154 printk(KERN_CONT "%02x ", insn);
1155 }
1156 }
1157 printk(KERN_CONT "\n");
1158#endif
1159 preempt_disable();
1160 show_regs(regs);
1161 preempt_enable();
1162}
1163
1164static int __init setup_print_fatal_signals(char *str)
1165{
1166 get_option (&str, &print_fatal_signals);
1167
1168 return 1;
1169}
1170
1171__setup("print-fatal-signals=", setup_print_fatal_signals);
1172
1173int
1174__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1175{
1176 return send_signal(sig, info, p, 1);
1177}
1178
1179static int
1180specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1181{
1182 return send_signal(sig, info, t, 0);
1183}
1184
1185int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1186 bool group)
1187{
1188 unsigned long flags;
1189 int ret = -ESRCH;
1190
1191 if (lock_task_sighand(p, &flags)) {
1192 ret = send_signal(sig, info, p, group);
1193 unlock_task_sighand(p, &flags);
1194 }
1195
1196 return ret;
1197}
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210int
1211force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1212{
1213 unsigned long int flags;
1214 int ret, blocked, ignored;
1215 struct k_sigaction *action;
1216
1217 spin_lock_irqsave(&t->sighand->siglock, flags);
1218 action = &t->sighand->action[sig-1];
1219 ignored = action->sa.sa_handler == SIG_IGN;
1220 blocked = sigismember(&t->blocked, sig);
1221 if (blocked || ignored) {
1222 action->sa.sa_handler = SIG_DFL;
1223 if (blocked) {
1224 sigdelset(&t->blocked, sig);
1225 recalc_sigpending_and_wake(t);
1226 }
1227 }
1228 if (action->sa.sa_handler == SIG_DFL)
1229 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1230 ret = specific_send_sig_info(sig, info, t);
1231 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1232
1233 return ret;
1234}
1235
1236
1237
1238
1239int zap_other_threads(struct task_struct *p)
1240{
1241 struct task_struct *t = p;
1242 int count = 0;
1243
1244 p->signal->group_stop_count = 0;
1245
1246 while_each_thread(p, t) {
1247 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1248 count++;
1249
1250
1251 if (t->exit_state)
1252 continue;
1253 sigaddset(&t->pending.signal, SIGKILL);
1254 signal_wake_up(t, 1);
1255 }
1256
1257 return count;
1258}
1259
1260struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1261 unsigned long *flags)
1262{
1263 struct sighand_struct *sighand;
1264
1265 for (;;) {
1266
1267
1268
1269
1270 local_irq_save(*flags);
1271 rcu_read_lock();
1272 sighand = rcu_dereference(tsk->sighand);
1273 if (unlikely(sighand == NULL)) {
1274 rcu_read_unlock();
1275 local_irq_restore(*flags);
1276 break;
1277 }
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289 spin_lock(&sighand->siglock);
1290 if (likely(sighand == tsk->sighand)) {
1291 rcu_read_unlock();
1292 break;
1293 }
1294 spin_unlock(&sighand->siglock);
1295 rcu_read_unlock();
1296 local_irq_restore(*flags);
1297 }
1298
1299 return sighand;
1300}
1301
1302
1303
1304
1305int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1306{
1307 int ret;
1308
1309 rcu_read_lock();
1310 ret = check_kill_permission(sig, info, p);
1311 rcu_read_unlock();
1312
1313 if (!ret && sig)
1314 ret = do_send_sig_info(sig, info, p, true);
1315
1316 return ret;
1317}
1318
1319
1320
1321
1322
1323
1324int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1325{
1326 struct task_struct *p = NULL;
1327 int retval, success;
1328
1329 success = 0;
1330 retval = -ESRCH;
1331 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1332 int err = group_send_sig_info(sig, info, p);
1333 success |= !err;
1334 retval = err;
1335 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1336 return success ? 0 : retval;
1337}
1338
1339int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1340{
1341 int error = -ESRCH;
1342 struct task_struct *p;
1343
1344 for (;;) {
1345 rcu_read_lock();
1346 p = pid_task(pid, PIDTYPE_PID);
1347 if (p)
1348 error = group_send_sig_info(sig, info, p);
1349 rcu_read_unlock();
1350 if (likely(!p || error != -ESRCH))
1351 return error;
1352
1353
1354
1355
1356
1357
1358 }
1359}
1360
1361int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1362{
1363 int error;
1364 rcu_read_lock();
1365 error = kill_pid_info(sig, info, find_vpid(pid));
1366 rcu_read_unlock();
1367 return error;
1368}
1369
1370static int kill_as_cred_perm(const struct cred *cred,
1371 struct task_struct *target)
1372{
1373 const struct cred *pcred = __task_cred(target);
1374 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1375 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1376 return 0;
1377 return 1;
1378}
1379
1380
1381int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1382 const struct cred *cred, u32 secid)
1383{
1384 int ret = -EINVAL;
1385 struct task_struct *p;
1386 unsigned long flags;
1387
1388 if (!valid_signal(sig))
1389 return ret;
1390
1391 rcu_read_lock();
1392 p = pid_task(pid, PIDTYPE_PID);
1393 if (!p) {
1394 ret = -ESRCH;
1395 goto out_unlock;
1396 }
1397 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1398 ret = -EPERM;
1399 goto out_unlock;
1400 }
1401 ret = security_task_kill(p, info, sig, secid);
1402 if (ret)
1403 goto out_unlock;
1404
1405 if (sig) {
1406 if (lock_task_sighand(p, &flags)) {
1407 ret = __send_signal(sig, info, p, 1, 0);
1408 unlock_task_sighand(p, &flags);
1409 } else
1410 ret = -ESRCH;
1411 }
1412out_unlock:
1413 rcu_read_unlock();
1414 return ret;
1415}
1416EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1417
1418
1419
1420
1421
1422
1423
1424
1425static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1426{
1427 int ret;
1428
1429 if (pid > 0) {
1430 rcu_read_lock();
1431 ret = kill_pid_info(sig, info, find_vpid(pid));
1432 rcu_read_unlock();
1433 return ret;
1434 }
1435
1436 read_lock(&tasklist_lock);
1437 if (pid != -1) {
1438 ret = __kill_pgrp_info(sig, info,
1439 pid ? find_vpid(-pid) : task_pgrp(current));
1440 } else {
1441 int retval = 0, count = 0;
1442 struct task_struct * p;
1443
1444 for_each_process(p) {
1445 if (task_pid_vnr(p) > 1 &&
1446 !same_thread_group(p, current)) {
1447 int err = group_send_sig_info(sig, info, p);
1448 ++count;
1449 if (err != -EPERM)
1450 retval = err;
1451 }
1452 }
1453 ret = count ? retval : -ESRCH;
1454 }
1455 read_unlock(&tasklist_lock);
1456
1457 return ret;
1458}
1459
1460
1461
1462
1463
1464int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1465{
1466
1467
1468
1469
1470 if (!valid_signal(sig))
1471 return -EINVAL;
1472
1473 return do_send_sig_info(sig, info, p, false);
1474}
1475
1476#define __si_special(priv) \
1477 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1478
1479int
1480send_sig(int sig, struct task_struct *p, int priv)
1481{
1482 return send_sig_info(sig, __si_special(priv), p);
1483}
1484
1485void
1486force_sig(int sig, struct task_struct *p)
1487{
1488 force_sig_info(sig, SEND_SIG_PRIV, p);
1489}
1490
1491
1492
1493
1494
1495
1496
1497int
1498force_sigsegv(int sig, struct task_struct *p)
1499{
1500 if (sig == SIGSEGV) {
1501 unsigned long flags;
1502 spin_lock_irqsave(&p->sighand->siglock, flags);
1503 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1504 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1505 }
1506 force_sig(SIGSEGV, p);
1507 return 0;
1508}
1509
1510int kill_pgrp(struct pid *pid, int sig, int priv)
1511{
1512 int ret;
1513
1514 read_lock(&tasklist_lock);
1515 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1516 read_unlock(&tasklist_lock);
1517
1518 return ret;
1519}
1520EXPORT_SYMBOL(kill_pgrp);
1521
1522int kill_pid(struct pid *pid, int sig, int priv)
1523{
1524 return kill_pid_info(sig, __si_special(priv), pid);
1525}
1526EXPORT_SYMBOL(kill_pid);
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537struct sigqueue *sigqueue_alloc(void)
1538{
1539 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1540
1541 if (q)
1542 q->flags |= SIGQUEUE_PREALLOC;
1543
1544 return q;
1545}
1546
1547void sigqueue_free(struct sigqueue *q)
1548{
1549 unsigned long flags;
1550 spinlock_t *lock = ¤t->sighand->siglock;
1551
1552 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1553
1554
1555
1556
1557
1558 spin_lock_irqsave(lock, flags);
1559 q->flags &= ~SIGQUEUE_PREALLOC;
1560
1561
1562
1563
1564 if (!list_empty(&q->list))
1565 q = NULL;
1566 spin_unlock_irqrestore(lock, flags);
1567
1568 if (q)
1569 __sigqueue_free(q);
1570}
1571
1572int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1573{
1574 int sig = q->info.si_signo;
1575 struct sigpending *pending;
1576 unsigned long flags;
1577 int ret, result;
1578
1579 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1580
1581 ret = -1;
1582 if (!likely(lock_task_sighand(t, &flags)))
1583 goto ret;
1584
1585 ret = 1;
1586 result = TRACE_SIGNAL_IGNORED;
1587 if (!prepare_signal(sig, t, false))
1588 goto out;
1589
1590 ret = 0;
1591 if (unlikely(!list_empty(&q->list))) {
1592
1593
1594
1595
1596 BUG_ON(q->info.si_code != SI_TIMER);
1597 q->info.si_overrun++;
1598 result = TRACE_SIGNAL_ALREADY_PENDING;
1599 goto out;
1600 }
1601 q->info.si_overrun = 0;
1602
1603 signalfd_notify(t, sig);
1604 pending = group ? &t->signal->shared_pending : &t->pending;
1605 list_add_tail(&q->list, &pending->list);
1606 sigaddset(&pending->signal, sig);
1607 complete_signal(sig, t, group);
1608 result = TRACE_SIGNAL_DELIVERED;
1609out:
1610 trace_signal_generate(sig, &q->info, t, group, result);
1611 unlock_task_sighand(t, &flags);
1612ret:
1613 return ret;
1614}
1615
1616
1617
1618
1619
1620
1621
1622
1623bool do_notify_parent(struct task_struct *tsk, int sig)
1624{
1625 struct siginfo info;
1626 unsigned long flags;
1627 struct sighand_struct *psig;
1628 bool autoreap = false;
1629 cputime_t utime, stime;
1630
1631 BUG_ON(sig == -1);
1632
1633
1634 BUG_ON(task_is_stopped_or_traced(tsk));
1635
1636 BUG_ON(!tsk->ptrace &&
1637 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1638
1639 if (sig != SIGCHLD) {
1640
1641
1642
1643
1644 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1645 sig = SIGCHLD;
1646 }
1647
1648 info.si_signo = sig;
1649 info.si_errno = 0;
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661 rcu_read_lock();
1662 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1663 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1664 task_uid(tsk));
1665 rcu_read_unlock();
1666
1667 task_cputime(tsk, &utime, &stime);
1668 info.si_utime = cputime_to_clock_t(utime + tsk->signal->utime);
1669 info.si_stime = cputime_to_clock_t(stime + tsk->signal->stime);
1670
1671 info.si_status = tsk->exit_code & 0x7f;
1672 if (tsk->exit_code & 0x80)
1673 info.si_code = CLD_DUMPED;
1674 else if (tsk->exit_code & 0x7f)
1675 info.si_code = CLD_KILLED;
1676 else {
1677 info.si_code = CLD_EXITED;
1678 info.si_status = tsk->exit_code >> 8;
1679 }
1680
1681 psig = tsk->parent->sighand;
1682 spin_lock_irqsave(&psig->siglock, flags);
1683 if (!tsk->ptrace && sig == SIGCHLD &&
1684 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1685 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701 autoreap = true;
1702 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1703 sig = 0;
1704 }
1705 if (valid_signal(sig) && sig)
1706 __group_send_sig_info(sig, &info, tsk->parent);
1707 __wake_up_parent(tsk, tsk->parent);
1708 spin_unlock_irqrestore(&psig->siglock, flags);
1709
1710 return autoreap;
1711}
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726static void do_notify_parent_cldstop(struct task_struct *tsk,
1727 bool for_ptracer, int why)
1728{
1729 struct siginfo info;
1730 unsigned long flags;
1731 struct task_struct *parent;
1732 struct sighand_struct *sighand;
1733 cputime_t utime, stime;
1734
1735 if (for_ptracer) {
1736 parent = tsk->parent;
1737 } else {
1738 tsk = tsk->group_leader;
1739 parent = tsk->real_parent;
1740 }
1741
1742 info.si_signo = SIGCHLD;
1743 info.si_errno = 0;
1744
1745
1746
1747 rcu_read_lock();
1748 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1749 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1750 rcu_read_unlock();
1751
1752 task_cputime(tsk, &utime, &stime);
1753 info.si_utime = cputime_to_clock_t(utime);
1754 info.si_stime = cputime_to_clock_t(stime);
1755
1756 info.si_code = why;
1757 switch (why) {
1758 case CLD_CONTINUED:
1759 info.si_status = SIGCONT;
1760 break;
1761 case CLD_STOPPED:
1762 info.si_status = tsk->signal->group_exit_code & 0x7f;
1763 break;
1764 case CLD_TRAPPED:
1765 info.si_status = tsk->exit_code & 0x7f;
1766 break;
1767 default:
1768 BUG();
1769 }
1770
1771 sighand = parent->sighand;
1772 spin_lock_irqsave(&sighand->siglock, flags);
1773 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1774 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1775 __group_send_sig_info(SIGCHLD, &info, parent);
1776
1777
1778
1779 __wake_up_parent(tsk, parent);
1780 spin_unlock_irqrestore(&sighand->siglock, flags);
1781}
1782
1783static inline int may_ptrace_stop(void)
1784{
1785 if (!likely(current->ptrace))
1786 return 0;
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800 if (unlikely(current->mm->core_state) &&
1801 unlikely(current->mm == current->parent->mm))
1802 return 0;
1803
1804 return 1;
1805}
1806
1807
1808
1809
1810
1811static int sigkill_pending(struct task_struct *tsk)
1812{
1813 return sigismember(&tsk->pending.signal, SIGKILL) ||
1814 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1815}
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1829 __releases(¤t->sighand->siglock)
1830 __acquires(¤t->sighand->siglock)
1831{
1832 bool gstop_done = false;
1833
1834 if (arch_ptrace_stop_needed(exit_code, info)) {
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846 spin_unlock_irq(¤t->sighand->siglock);
1847 arch_ptrace_stop(exit_code, info);
1848 spin_lock_irq(¤t->sighand->siglock);
1849 if (sigkill_pending(current))
1850 return;
1851 }
1852
1853
1854
1855
1856
1857
1858
1859
1860 set_current_state(TASK_TRACED);
1861
1862 current->last_siginfo = info;
1863 current->exit_code = exit_code;
1864
1865
1866
1867
1868
1869
1870
1871
1872 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1873 gstop_done = task_participate_group_stop(current);
1874
1875
1876 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1877 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1878 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1879
1880
1881 task_clear_jobctl_trapping(current);
1882
1883 spin_unlock_irq(¤t->sighand->siglock);
1884 read_lock(&tasklist_lock);
1885 if (may_ptrace_stop()) {
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896 do_notify_parent_cldstop(current, true, why);
1897 if (gstop_done && ptrace_reparented(current))
1898 do_notify_parent_cldstop(current, false, why);
1899
1900
1901
1902
1903
1904
1905
1906 preempt_disable();
1907 read_unlock(&tasklist_lock);
1908 preempt_enable_no_resched();
1909 freezable_schedule();
1910 } else {
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921 if (gstop_done)
1922 do_notify_parent_cldstop(current, false, why);
1923
1924
1925 __set_current_state(TASK_RUNNING);
1926 if (clear_code)
1927 current->exit_code = 0;
1928 read_unlock(&tasklist_lock);
1929 }
1930
1931
1932
1933
1934
1935
1936 spin_lock_irq(¤t->sighand->siglock);
1937 current->last_siginfo = NULL;
1938
1939
1940 current->jobctl &= ~JOBCTL_LISTENING;
1941
1942
1943
1944
1945
1946
1947 recalc_sigpending_tsk(current);
1948}
1949
1950static void ptrace_do_notify(int signr, int exit_code, int why)
1951{
1952 siginfo_t info;
1953
1954 memset(&info, 0, sizeof info);
1955 info.si_signo = signr;
1956 info.si_code = exit_code;
1957 info.si_pid = task_pid_vnr(current);
1958 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1959
1960
1961 ptrace_stop(exit_code, why, 1, &info);
1962}
1963
1964void ptrace_notify(int exit_code)
1965{
1966 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1967 if (unlikely(current->task_works))
1968 task_work_run();
1969
1970 spin_lock_irq(¤t->sighand->siglock);
1971 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1972 spin_unlock_irq(¤t->sighand->siglock);
1973}
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997static bool do_signal_stop(int signr)
1998 __releases(¤t->sighand->siglock)
1999{
2000 struct signal_struct *sig = current->signal;
2001
2002 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2003 unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2004 struct task_struct *t;
2005
2006
2007 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2008
2009 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2010 unlikely(signal_group_exit(sig)))
2011 return false;
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2032 sig->group_exit_code = signr;
2033
2034 sig->group_stop_count = 0;
2035
2036 if (task_set_jobctl_pending(current, signr | gstop))
2037 sig->group_stop_count++;
2038
2039 t = current;
2040 while_each_thread(current, t) {
2041
2042
2043
2044
2045
2046 if (!task_is_stopped(t) &&
2047 task_set_jobctl_pending(t, signr | gstop)) {
2048 sig->group_stop_count++;
2049 if (likely(!(t->ptrace & PT_SEIZED)))
2050 signal_wake_up(t, 0);
2051 else
2052 ptrace_trap_notify(t);
2053 }
2054 }
2055 }
2056
2057 if (likely(!current->ptrace)) {
2058 int notify = 0;
2059
2060
2061
2062
2063
2064
2065 if (task_participate_group_stop(current))
2066 notify = CLD_STOPPED;
2067
2068 __set_current_state(TASK_STOPPED);
2069 spin_unlock_irq(¤t->sighand->siglock);
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080 if (notify) {
2081 read_lock(&tasklist_lock);
2082 do_notify_parent_cldstop(current, false, notify);
2083 read_unlock(&tasklist_lock);
2084 }
2085
2086
2087 freezable_schedule();
2088 return true;
2089 } else {
2090
2091
2092
2093
2094 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2095 return false;
2096 }
2097}
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114static void do_jobctl_trap(void)
2115{
2116 struct signal_struct *signal = current->signal;
2117 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2118
2119 if (current->ptrace & PT_SEIZED) {
2120 if (!signal->group_stop_count &&
2121 !(signal->flags & SIGNAL_STOP_STOPPED))
2122 signr = SIGTRAP;
2123 WARN_ON_ONCE(!signr);
2124 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2125 CLD_STOPPED);
2126 } else {
2127 WARN_ON_ONCE(!signr);
2128 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2129 current->exit_code = 0;
2130 }
2131}
2132
2133static int ptrace_signal(int signr, siginfo_t *info)
2134{
2135 ptrace_signal_deliver();
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2146 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2147
2148
2149 signr = current->exit_code;
2150 if (signr == 0)
2151 return signr;
2152
2153 current->exit_code = 0;
2154
2155
2156
2157
2158
2159
2160
2161 if (signr != info->si_signo) {
2162 info->si_signo = signr;
2163 info->si_errno = 0;
2164 info->si_code = SI_USER;
2165 rcu_read_lock();
2166 info->si_pid = task_pid_vnr(current->parent);
2167 info->si_uid = from_kuid_munged(current_user_ns(),
2168 task_uid(current->parent));
2169 rcu_read_unlock();
2170 }
2171
2172
2173 if (sigismember(¤t->blocked, signr)) {
2174 specific_send_sig_info(signr, info, current);
2175 signr = 0;
2176 }
2177
2178 return signr;
2179}
2180
2181int get_signal(struct ksignal *ksig)
2182{
2183 struct sighand_struct *sighand = current->sighand;
2184 struct signal_struct *signal = current->signal;
2185 int signr;
2186
2187 if (unlikely(current->task_works))
2188 task_work_run();
2189
2190 if (unlikely(uprobe_deny_signal()))
2191 return 0;
2192
2193
2194
2195
2196
2197
2198 try_to_freeze();
2199
2200relock:
2201 spin_lock_irq(&sighand->siglock);
2202
2203
2204
2205
2206
2207 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2208 int why;
2209
2210 if (signal->flags & SIGNAL_CLD_CONTINUED)
2211 why = CLD_CONTINUED;
2212 else
2213 why = CLD_STOPPED;
2214
2215 signal->flags &= ~SIGNAL_CLD_MASK;
2216
2217 spin_unlock_irq(&sighand->siglock);
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227 read_lock(&tasklist_lock);
2228 do_notify_parent_cldstop(current, false, why);
2229
2230 if (ptrace_reparented(current->group_leader))
2231 do_notify_parent_cldstop(current->group_leader,
2232 true, why);
2233 read_unlock(&tasklist_lock);
2234
2235 goto relock;
2236 }
2237
2238 for (;;) {
2239 struct k_sigaction *ka;
2240
2241 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2242 do_signal_stop(0))
2243 goto relock;
2244
2245 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2246 do_jobctl_trap();
2247 spin_unlock_irq(&sighand->siglock);
2248 goto relock;
2249 }
2250
2251 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2252
2253 if (!signr)
2254 break;
2255
2256 if (unlikely(current->ptrace) && signr != SIGKILL) {
2257 signr = ptrace_signal(signr, &ksig->info);
2258 if (!signr)
2259 continue;
2260 }
2261
2262 ka = &sighand->action[signr-1];
2263
2264
2265 trace_signal_deliver(signr, &ksig->info, ka);
2266
2267 if (ka->sa.sa_handler == SIG_IGN)
2268 continue;
2269 if (ka->sa.sa_handler != SIG_DFL) {
2270
2271 ksig->ka = *ka;
2272
2273 if (ka->sa.sa_flags & SA_ONESHOT)
2274 ka->sa.sa_handler = SIG_DFL;
2275
2276 break;
2277 }
2278
2279
2280
2281
2282 if (sig_kernel_ignore(signr))
2283 continue;
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2296 !sig_kernel_only(signr))
2297 continue;
2298
2299 if (sig_kernel_stop(signr)) {
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310 if (signr != SIGSTOP) {
2311 spin_unlock_irq(&sighand->siglock);
2312
2313
2314
2315 if (is_current_pgrp_orphaned())
2316 goto relock;
2317
2318 spin_lock_irq(&sighand->siglock);
2319 }
2320
2321 if (likely(do_signal_stop(ksig->info.si_signo))) {
2322
2323 goto relock;
2324 }
2325
2326
2327
2328
2329
2330 continue;
2331 }
2332
2333 spin_unlock_irq(&sighand->siglock);
2334
2335
2336
2337
2338 current->flags |= PF_SIGNALED;
2339
2340 if (sig_kernel_coredump(signr)) {
2341 if (print_fatal_signals)
2342 print_fatal_signal(ksig->info.si_signo);
2343 proc_coredump_connector(current);
2344
2345
2346
2347
2348
2349
2350
2351
2352 do_coredump(&ksig->info);
2353 }
2354
2355
2356
2357
2358 do_group_exit(ksig->info.si_signo);
2359
2360 }
2361 spin_unlock_irq(&sighand->siglock);
2362
2363 ksig->sig = signr;
2364 return ksig->sig > 0;
2365}
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377static void signal_delivered(struct ksignal *ksig, int stepping)
2378{
2379 sigset_t blocked;
2380
2381
2382
2383
2384
2385 clear_restore_sigmask();
2386
2387 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2388 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2389 sigaddset(&blocked, ksig->sig);
2390 set_current_blocked(&blocked);
2391 tracehook_signal_handler(stepping);
2392}
2393
2394void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2395{
2396 if (failed)
2397 force_sigsegv(ksig->sig, current);
2398 else
2399 signal_delivered(ksig, stepping);
2400}
2401
2402
2403
2404
2405
2406
2407static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2408{
2409 sigset_t retarget;
2410 struct task_struct *t;
2411
2412 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2413 if (sigisemptyset(&retarget))
2414 return;
2415
2416 t = tsk;
2417 while_each_thread(tsk, t) {
2418 if (t->flags & PF_EXITING)
2419 continue;
2420
2421 if (!has_pending_signals(&retarget, &t->blocked))
2422 continue;
2423
2424 sigandsets(&retarget, &retarget, &t->blocked);
2425
2426 if (!signal_pending(t))
2427 signal_wake_up(t, 0);
2428
2429 if (sigisemptyset(&retarget))
2430 break;
2431 }
2432}
2433
2434void exit_signals(struct task_struct *tsk)
2435{
2436 int group_stop = 0;
2437 sigset_t unblocked;
2438
2439
2440
2441
2442
2443 threadgroup_change_begin(tsk);
2444
2445 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2446 tsk->flags |= PF_EXITING;
2447 threadgroup_change_end(tsk);
2448 return;
2449 }
2450
2451 spin_lock_irq(&tsk->sighand->siglock);
2452
2453
2454
2455
2456 tsk->flags |= PF_EXITING;
2457
2458 threadgroup_change_end(tsk);
2459
2460 if (!signal_pending(tsk))
2461 goto out;
2462
2463 unblocked = tsk->blocked;
2464 signotset(&unblocked);
2465 retarget_shared_pending(tsk, &unblocked);
2466
2467 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2468 task_participate_group_stop(tsk))
2469 group_stop = CLD_STOPPED;
2470out:
2471 spin_unlock_irq(&tsk->sighand->siglock);
2472
2473
2474
2475
2476
2477 if (unlikely(group_stop)) {
2478 read_lock(&tasklist_lock);
2479 do_notify_parent_cldstop(tsk, false, group_stop);
2480 read_unlock(&tasklist_lock);
2481 }
2482}
2483
2484EXPORT_SYMBOL(recalc_sigpending);
2485EXPORT_SYMBOL_GPL(dequeue_signal);
2486EXPORT_SYMBOL(flush_signals);
2487EXPORT_SYMBOL(force_sig);
2488EXPORT_SYMBOL(send_sig);
2489EXPORT_SYMBOL(send_sig_info);
2490EXPORT_SYMBOL(sigprocmask);
2491EXPORT_SYMBOL(block_all_signals);
2492EXPORT_SYMBOL(unblock_all_signals);
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502SYSCALL_DEFINE0(restart_syscall)
2503{
2504 struct restart_block *restart = ¤t->restart_block;
2505 return restart->fn(restart);
2506}
2507
2508long do_no_restart_syscall(struct restart_block *param)
2509{
2510 return -EINTR;
2511}
2512
2513static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2514{
2515 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2516 sigset_t newblocked;
2517
2518 sigandnsets(&newblocked, newset, ¤t->blocked);
2519 retarget_shared_pending(tsk, &newblocked);
2520 }
2521 tsk->blocked = *newset;
2522 recalc_sigpending();
2523}
2524
2525
2526
2527
2528
2529
2530
2531
2532void set_current_blocked(sigset_t *newset)
2533{
2534 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2535 __set_current_blocked(newset);
2536}
2537
2538void __set_current_blocked(const sigset_t *newset)
2539{
2540 struct task_struct *tsk = current;
2541
2542 spin_lock_irq(&tsk->sighand->siglock);
2543 __set_task_blocked(tsk, newset);
2544 spin_unlock_irq(&tsk->sighand->siglock);
2545}
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2556{
2557 struct task_struct *tsk = current;
2558 sigset_t newset;
2559
2560
2561 if (oldset)
2562 *oldset = tsk->blocked;
2563
2564 switch (how) {
2565 case SIG_BLOCK:
2566 sigorsets(&newset, &tsk->blocked, set);
2567 break;
2568 case SIG_UNBLOCK:
2569 sigandnsets(&newset, &tsk->blocked, set);
2570 break;
2571 case SIG_SETMASK:
2572 newset = *set;
2573 break;
2574 default:
2575 return -EINVAL;
2576 }
2577
2578 __set_current_blocked(&newset);
2579 return 0;
2580}
2581
2582
2583
2584
2585
2586
2587
2588
2589SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2590 sigset_t __user *, oset, size_t, sigsetsize)
2591{
2592 sigset_t old_set, new_set;
2593 int error;
2594
2595
2596 if (sigsetsize != sizeof(sigset_t))
2597 return -EINVAL;
2598
2599 old_set = current->blocked;
2600
2601 if (nset) {
2602 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2603 return -EFAULT;
2604 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2605
2606 error = sigprocmask(how, &new_set, NULL);
2607 if (error)
2608 return error;
2609 }
2610
2611 if (oset) {
2612 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2613 return -EFAULT;
2614 }
2615
2616 return 0;
2617}
2618
2619#ifdef CONFIG_COMPAT
2620COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2621 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2622{
2623#ifdef __BIG_ENDIAN
2624 sigset_t old_set = current->blocked;
2625
2626
2627 if (sigsetsize != sizeof(sigset_t))
2628 return -EINVAL;
2629
2630 if (nset) {
2631 compat_sigset_t new32;
2632 sigset_t new_set;
2633 int error;
2634 if (copy_from_user(&new32, nset, sizeof(compat_sigset_t)))
2635 return -EFAULT;
2636
2637 sigset_from_compat(&new_set, &new32);
2638 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2639
2640 error = sigprocmask(how, &new_set, NULL);
2641 if (error)
2642 return error;
2643 }
2644 if (oset) {
2645 compat_sigset_t old32;
2646 sigset_to_compat(&old32, &old_set);
2647 if (copy_to_user(oset, &old32, sizeof(compat_sigset_t)))
2648 return -EFAULT;
2649 }
2650 return 0;
2651#else
2652 return sys_rt_sigprocmask(how, (sigset_t __user *)nset,
2653 (sigset_t __user *)oset, sigsetsize);
2654#endif
2655}
2656#endif
2657
2658static int do_sigpending(void *set, unsigned long sigsetsize)
2659{
2660 if (sigsetsize > sizeof(sigset_t))
2661 return -EINVAL;
2662
2663 spin_lock_irq(¤t->sighand->siglock);
2664 sigorsets(set, ¤t->pending.signal,
2665 ¤t->signal->shared_pending.signal);
2666 spin_unlock_irq(¤t->sighand->siglock);
2667
2668
2669 sigandsets(set, ¤t->blocked, set);
2670 return 0;
2671}
2672
2673
2674
2675
2676
2677
2678
2679SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2680{
2681 sigset_t set;
2682 int err = do_sigpending(&set, sigsetsize);
2683 if (!err && copy_to_user(uset, &set, sigsetsize))
2684 err = -EFAULT;
2685 return err;
2686}
2687
2688#ifdef CONFIG_COMPAT
2689COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2690 compat_size_t, sigsetsize)
2691{
2692#ifdef __BIG_ENDIAN
2693 sigset_t set;
2694 int err = do_sigpending(&set, sigsetsize);
2695 if (!err) {
2696 compat_sigset_t set32;
2697 sigset_to_compat(&set32, &set);
2698
2699 if (copy_to_user(uset, &set32, sigsetsize))
2700 err = -EFAULT;
2701 }
2702 return err;
2703#else
2704 return sys_rt_sigpending((sigset_t __user *)uset, sigsetsize);
2705#endif
2706}
2707#endif
2708
2709#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2710
2711int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2712{
2713 int err;
2714
2715 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2716 return -EFAULT;
2717 if (from->si_code < 0)
2718 return __copy_to_user(to, from, sizeof(siginfo_t))
2719 ? -EFAULT : 0;
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729 err = __put_user(from->si_signo, &to->si_signo);
2730 err |= __put_user(from->si_errno, &to->si_errno);
2731 err |= __put_user((short)from->si_code, &to->si_code);
2732 switch (from->si_code & __SI_MASK) {
2733 case __SI_KILL:
2734 err |= __put_user(from->si_pid, &to->si_pid);
2735 err |= __put_user(from->si_uid, &to->si_uid);
2736 break;
2737 case __SI_TIMER:
2738 err |= __put_user(from->si_tid, &to->si_tid);
2739 err |= __put_user(from->si_overrun, &to->si_overrun);
2740 err |= __put_user(from->si_ptr, &to->si_ptr);
2741 break;
2742 case __SI_POLL:
2743 err |= __put_user(from->si_band, &to->si_band);
2744 err |= __put_user(from->si_fd, &to->si_fd);
2745 break;
2746 case __SI_FAULT:
2747 err |= __put_user(from->si_addr, &to->si_addr);
2748#ifdef __ARCH_SI_TRAPNO
2749 err |= __put_user(from->si_trapno, &to->si_trapno);
2750#endif
2751#ifdef BUS_MCEERR_AO
2752
2753
2754
2755
2756 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2757 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2758#endif
2759#ifdef SEGV_BNDERR
2760 err |= __put_user(from->si_lower, &to->si_lower);
2761 err |= __put_user(from->si_upper, &to->si_upper);
2762#endif
2763 break;
2764 case __SI_CHLD:
2765 err |= __put_user(from->si_pid, &to->si_pid);
2766 err |= __put_user(from->si_uid, &to->si_uid);
2767 err |= __put_user(from->si_status, &to->si_status);
2768 err |= __put_user(from->si_utime, &to->si_utime);
2769 err |= __put_user(from->si_stime, &to->si_stime);
2770 break;
2771 case __SI_RT:
2772 case __SI_MESGQ:
2773 err |= __put_user(from->si_pid, &to->si_pid);
2774 err |= __put_user(from->si_uid, &to->si_uid);
2775 err |= __put_user(from->si_ptr, &to->si_ptr);
2776 break;
2777#ifdef __ARCH_SIGSYS
2778 case __SI_SYS:
2779 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2780 err |= __put_user(from->si_syscall, &to->si_syscall);
2781 err |= __put_user(from->si_arch, &to->si_arch);
2782 break;
2783#endif
2784 default:
2785 err |= __put_user(from->si_pid, &to->si_pid);
2786 err |= __put_user(from->si_uid, &to->si_uid);
2787 break;
2788 }
2789 return err;
2790}
2791
2792#endif
2793
2794
2795
2796
2797
2798
2799
2800int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2801 const struct timespec *ts)
2802{
2803 struct task_struct *tsk = current;
2804 long timeout = MAX_SCHEDULE_TIMEOUT;
2805 sigset_t mask = *which;
2806 int sig;
2807
2808 if (ts) {
2809 if (!timespec_valid(ts))
2810 return -EINVAL;
2811 timeout = timespec_to_jiffies(ts);
2812
2813
2814
2815
2816 if (ts->tv_sec || ts->tv_nsec)
2817 timeout++;
2818 }
2819
2820
2821
2822
2823 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2824 signotset(&mask);
2825
2826 spin_lock_irq(&tsk->sighand->siglock);
2827 sig = dequeue_signal(tsk, &mask, info);
2828 if (!sig && timeout) {
2829
2830
2831
2832
2833
2834
2835 tsk->real_blocked = tsk->blocked;
2836 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2837 recalc_sigpending();
2838 spin_unlock_irq(&tsk->sighand->siglock);
2839
2840 timeout = freezable_schedule_timeout_interruptible(timeout);
2841
2842 spin_lock_irq(&tsk->sighand->siglock);
2843 __set_task_blocked(tsk, &tsk->real_blocked);
2844 sigemptyset(&tsk->real_blocked);
2845 sig = dequeue_signal(tsk, &mask, info);
2846 }
2847 spin_unlock_irq(&tsk->sighand->siglock);
2848
2849 if (sig)
2850 return sig;
2851 return timeout ? -EINTR : -EAGAIN;
2852}
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2863 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2864 size_t, sigsetsize)
2865{
2866 sigset_t these;
2867 struct timespec ts;
2868 siginfo_t info;
2869 int ret;
2870
2871
2872 if (sigsetsize != sizeof(sigset_t))
2873 return -EINVAL;
2874
2875 if (copy_from_user(&these, uthese, sizeof(these)))
2876 return -EFAULT;
2877
2878 if (uts) {
2879 if (copy_from_user(&ts, uts, sizeof(ts)))
2880 return -EFAULT;
2881 }
2882
2883 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2884
2885 if (ret > 0 && uinfo) {
2886 if (copy_siginfo_to_user(uinfo, &info))
2887 ret = -EFAULT;
2888 }
2889
2890 return ret;
2891}
2892
2893
2894
2895
2896
2897
2898SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2899{
2900 struct siginfo info;
2901
2902 info.si_signo = sig;
2903 info.si_errno = 0;
2904 info.si_code = SI_USER;
2905 info.si_pid = task_tgid_vnr(current);
2906 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2907
2908 return kill_something_info(sig, &info, pid);
2909}
2910
2911static int
2912do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2913{
2914 struct task_struct *p;
2915 int error = -ESRCH;
2916
2917 rcu_read_lock();
2918 p = find_task_by_vpid(pid);
2919 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2920 error = check_kill_permission(sig, info, p);
2921
2922
2923
2924
2925 if (!error && sig) {
2926 error = do_send_sig_info(sig, info, p, false);
2927
2928
2929
2930
2931
2932 if (unlikely(error == -ESRCH))
2933 error = 0;
2934 }
2935 }
2936 rcu_read_unlock();
2937
2938 return error;
2939}
2940
2941static int do_tkill(pid_t tgid, pid_t pid, int sig)
2942{
2943 struct siginfo info = {};
2944
2945 info.si_signo = sig;
2946 info.si_errno = 0;
2947 info.si_code = SI_TKILL;
2948 info.si_pid = task_tgid_vnr(current);
2949 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2950
2951 return do_send_specific(tgid, pid, sig, &info);
2952}
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2965{
2966
2967 if (pid <= 0 || tgid <= 0)
2968 return -EINVAL;
2969
2970 return do_tkill(tgid, pid, sig);
2971}
2972
2973
2974
2975
2976
2977
2978
2979
2980SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2981{
2982
2983 if (pid <= 0)
2984 return -EINVAL;
2985
2986 return do_tkill(0, pid, sig);
2987}
2988
2989static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
2990{
2991
2992
2993
2994 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
2995 (task_pid_vnr(current) != pid)) {
2996
2997 WARN_ON_ONCE(info->si_code < 0);
2998 return -EPERM;
2999 }
3000 info->si_signo = sig;
3001
3002
3003 return kill_proc_info(sig, info, pid);
3004}
3005
3006
3007
3008
3009
3010
3011
3012SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3013 siginfo_t __user *, uinfo)
3014{
3015 siginfo_t info;
3016 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3017 return -EFAULT;
3018 return do_rt_sigqueueinfo(pid, sig, &info);
3019}
3020
3021#ifdef CONFIG_COMPAT
3022COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3023 compat_pid_t, pid,
3024 int, sig,
3025 struct compat_siginfo __user *, uinfo)
3026{
3027 siginfo_t info;
3028 int ret = copy_siginfo_from_user32(&info, uinfo);
3029 if (unlikely(ret))
3030 return ret;
3031 return do_rt_sigqueueinfo(pid, sig, &info);
3032}
3033#endif
3034
3035static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3036{
3037
3038 if (pid <= 0 || tgid <= 0)
3039 return -EINVAL;
3040
3041
3042
3043
3044 if (((info->si_code >= 0 || info->si_code == SI_TKILL)) &&
3045 (task_pid_vnr(current) != pid)) {
3046
3047 WARN_ON_ONCE(info->si_code < 0);
3048 return -EPERM;
3049 }
3050 info->si_signo = sig;
3051
3052 return do_send_specific(tgid, pid, sig, info);
3053}
3054
3055SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3056 siginfo_t __user *, uinfo)
3057{
3058 siginfo_t info;
3059
3060 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3061 return -EFAULT;
3062
3063 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3064}
3065
3066#ifdef CONFIG_COMPAT
3067COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3068 compat_pid_t, tgid,
3069 compat_pid_t, pid,
3070 int, sig,
3071 struct compat_siginfo __user *, uinfo)
3072{
3073 siginfo_t info;
3074
3075 if (copy_siginfo_from_user32(&info, uinfo))
3076 return -EFAULT;
3077 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3078}
3079#endif
3080
3081
3082
3083
3084void kernel_sigaction(int sig, __sighandler_t action)
3085{
3086 spin_lock_irq(¤t->sighand->siglock);
3087 current->sighand->action[sig - 1].sa.sa_handler = action;
3088 if (action == SIG_IGN) {
3089 sigset_t mask;
3090
3091 sigemptyset(&mask);
3092 sigaddset(&mask, sig);
3093
3094 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3095 flush_sigqueue_mask(&mask, ¤t->pending);
3096 recalc_sigpending();
3097 }
3098 spin_unlock_irq(¤t->sighand->siglock);
3099}
3100EXPORT_SYMBOL(kernel_sigaction);
3101
3102int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3103{
3104 struct task_struct *p = current, *t;
3105 struct k_sigaction *k;
3106 sigset_t mask;
3107
3108 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3109 return -EINVAL;
3110
3111 k = &p->sighand->action[sig-1];
3112
3113 spin_lock_irq(&p->sighand->siglock);
3114 if (oact)
3115 *oact = *k;
3116
3117 if (act) {
3118 sigdelsetmask(&act->sa.sa_mask,
3119 sigmask(SIGKILL) | sigmask(SIGSTOP));
3120 *k = *act;
3121
3122
3123
3124
3125
3126
3127
3128
3129
3130
3131
3132 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3133 sigemptyset(&mask);
3134 sigaddset(&mask, sig);
3135 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3136 for_each_thread(p, t)
3137 flush_sigqueue_mask(&mask, &t->pending);
3138 }
3139 }
3140
3141 spin_unlock_irq(&p->sighand->siglock);
3142 return 0;
3143}
3144
3145static int
3146do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
3147{
3148 stack_t oss;
3149 int error;
3150
3151 oss.ss_sp = (void __user *) current->sas_ss_sp;
3152 oss.ss_size = current->sas_ss_size;
3153 oss.ss_flags = sas_ss_flags(sp);
3154
3155 if (uss) {
3156 void __user *ss_sp;
3157 size_t ss_size;
3158 int ss_flags;
3159
3160 error = -EFAULT;
3161 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
3162 goto out;
3163 error = __get_user(ss_sp, &uss->ss_sp) |
3164 __get_user(ss_flags, &uss->ss_flags) |
3165 __get_user(ss_size, &uss->ss_size);
3166 if (error)
3167 goto out;
3168
3169 error = -EPERM;
3170 if (on_sig_stack(sp))
3171 goto out;
3172
3173 error = -EINVAL;
3174
3175
3176
3177
3178
3179
3180
3181 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
3182 goto out;
3183
3184 if (ss_flags == SS_DISABLE) {
3185 ss_size = 0;
3186 ss_sp = NULL;
3187 } else {
3188 error = -ENOMEM;
3189 if (ss_size < MINSIGSTKSZ)
3190 goto out;
3191 }
3192
3193 current->sas_ss_sp = (unsigned long) ss_sp;
3194 current->sas_ss_size = ss_size;
3195 }
3196
3197 error = 0;
3198 if (uoss) {
3199 error = -EFAULT;
3200 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
3201 goto out;
3202 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
3203 __put_user(oss.ss_size, &uoss->ss_size) |
3204 __put_user(oss.ss_flags, &uoss->ss_flags);
3205 }
3206
3207out:
3208 return error;
3209}
3210SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3211{
3212 return do_sigaltstack(uss, uoss, current_user_stack_pointer());
3213}
3214
3215int restore_altstack(const stack_t __user *uss)
3216{
3217 int err = do_sigaltstack(uss, NULL, current_user_stack_pointer());
3218
3219 return err == -EFAULT ? err : 0;
3220}
3221
3222int __save_altstack(stack_t __user *uss, unsigned long sp)
3223{
3224 struct task_struct *t = current;
3225 return __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3226 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3227 __put_user(t->sas_ss_size, &uss->ss_size);
3228}
3229
3230#ifdef CONFIG_COMPAT
3231COMPAT_SYSCALL_DEFINE2(sigaltstack,
3232 const compat_stack_t __user *, uss_ptr,
3233 compat_stack_t __user *, uoss_ptr)
3234{
3235 stack_t uss, uoss;
3236 int ret;
3237 mm_segment_t seg;
3238
3239 if (uss_ptr) {
3240 compat_stack_t uss32;
3241
3242 memset(&uss, 0, sizeof(stack_t));
3243 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3244 return -EFAULT;
3245 uss.ss_sp = compat_ptr(uss32.ss_sp);
3246 uss.ss_flags = uss32.ss_flags;
3247 uss.ss_size = uss32.ss_size;
3248 }
3249 seg = get_fs();
3250 set_fs(KERNEL_DS);
3251 ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
3252 (stack_t __force __user *) &uoss,
3253 compat_user_stack_pointer());
3254 set_fs(seg);
3255 if (ret >= 0 && uoss_ptr) {
3256 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(compat_stack_t)) ||
3257 __put_user(ptr_to_compat(uoss.ss_sp), &uoss_ptr->ss_sp) ||
3258 __put_user(uoss.ss_flags, &uoss_ptr->ss_flags) ||
3259 __put_user(uoss.ss_size, &uoss_ptr->ss_size))
3260 ret = -EFAULT;
3261 }
3262 return ret;
3263}
3264
3265int compat_restore_altstack(const compat_stack_t __user *uss)
3266{
3267 int err = compat_sys_sigaltstack(uss, NULL);
3268
3269 return err == -EFAULT ? err : 0;
3270}
3271
3272int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3273{
3274 struct task_struct *t = current;
3275 return __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), &uss->ss_sp) |
3276 __put_user(sas_ss_flags(sp), &uss->ss_flags) |
3277 __put_user(t->sas_ss_size, &uss->ss_size);
3278}
3279#endif
3280
3281#ifdef __ARCH_WANT_SYS_SIGPENDING
3282
3283
3284
3285
3286
3287SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3288{
3289 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3290}
3291
3292#endif
3293
3294#ifdef __ARCH_WANT_SYS_SIGPROCMASK
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3306 old_sigset_t __user *, oset)
3307{
3308 old_sigset_t old_set, new_set;
3309 sigset_t new_blocked;
3310
3311 old_set = current->blocked.sig[0];
3312
3313 if (nset) {
3314 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3315 return -EFAULT;
3316
3317 new_blocked = current->blocked;
3318
3319 switch (how) {
3320 case SIG_BLOCK:
3321 sigaddsetmask(&new_blocked, new_set);
3322 break;
3323 case SIG_UNBLOCK:
3324 sigdelsetmask(&new_blocked, new_set);
3325 break;
3326 case SIG_SETMASK:
3327 new_blocked.sig[0] = new_set;
3328 break;
3329 default:
3330 return -EINVAL;
3331 }
3332
3333 set_current_blocked(&new_blocked);
3334 }
3335
3336 if (oset) {
3337 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3338 return -EFAULT;
3339 }
3340
3341 return 0;
3342}
3343#endif
3344
3345#ifndef CONFIG_ODD_RT_SIGACTION
3346
3347
3348
3349
3350
3351
3352
3353SYSCALL_DEFINE4(rt_sigaction, int, sig,
3354 const struct sigaction __user *, act,
3355 struct sigaction __user *, oact,
3356 size_t, sigsetsize)
3357{
3358 struct k_sigaction new_sa, old_sa;
3359 int ret = -EINVAL;
3360
3361
3362 if (sigsetsize != sizeof(sigset_t))
3363 goto out;
3364
3365 if (act) {
3366 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3367 return -EFAULT;
3368 }
3369
3370 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3371
3372 if (!ret && oact) {
3373 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3374 return -EFAULT;
3375 }
3376out:
3377 return ret;
3378}
3379#ifdef CONFIG_COMPAT
3380COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3381 const struct compat_sigaction __user *, act,
3382 struct compat_sigaction __user *, oact,
3383 compat_size_t, sigsetsize)
3384{
3385 struct k_sigaction new_ka, old_ka;
3386 compat_sigset_t mask;
3387#ifdef __ARCH_HAS_SA_RESTORER
3388 compat_uptr_t restorer;
3389#endif
3390 int ret;
3391
3392
3393 if (sigsetsize != sizeof(compat_sigset_t))
3394 return -EINVAL;
3395
3396 if (act) {
3397 compat_uptr_t handler;
3398 ret = get_user(handler, &act->sa_handler);
3399 new_ka.sa.sa_handler = compat_ptr(handler);
3400#ifdef __ARCH_HAS_SA_RESTORER
3401 ret |= get_user(restorer, &act->sa_restorer);
3402 new_ka.sa.sa_restorer = compat_ptr(restorer);
3403#endif
3404 ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask));
3405 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3406 if (ret)
3407 return -EFAULT;
3408 sigset_from_compat(&new_ka.sa.sa_mask, &mask);
3409 }
3410
3411 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3412 if (!ret && oact) {
3413 sigset_to_compat(&mask, &old_ka.sa.sa_mask);
3414 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3415 &oact->sa_handler);
3416 ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask));
3417 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3418#ifdef __ARCH_HAS_SA_RESTORER
3419 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3420 &oact->sa_restorer);
3421#endif
3422 }
3423 return ret;
3424}
3425#endif
3426#endif
3427
3428#ifdef CONFIG_OLD_SIGACTION
3429SYSCALL_DEFINE3(sigaction, int, sig,
3430 const struct old_sigaction __user *, act,
3431 struct old_sigaction __user *, oact)
3432{
3433 struct k_sigaction new_ka, old_ka;
3434 int ret;
3435
3436 if (act) {
3437 old_sigset_t mask;
3438 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3439 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3440 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3441 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3442 __get_user(mask, &act->sa_mask))
3443 return -EFAULT;
3444#ifdef __ARCH_HAS_KA_RESTORER
3445 new_ka.ka_restorer = NULL;
3446#endif
3447 siginitset(&new_ka.sa.sa_mask, mask);
3448 }
3449
3450 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3451
3452 if (!ret && oact) {
3453 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3454 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3455 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3456 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3457 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3458 return -EFAULT;
3459 }
3460
3461 return ret;
3462}
3463#endif
3464#ifdef CONFIG_COMPAT_OLD_SIGACTION
3465COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3466 const struct compat_old_sigaction __user *, act,
3467 struct compat_old_sigaction __user *, oact)
3468{
3469 struct k_sigaction new_ka, old_ka;
3470 int ret;
3471 compat_old_sigset_t mask;
3472 compat_uptr_t handler, restorer;
3473
3474 if (act) {
3475 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3476 __get_user(handler, &act->sa_handler) ||
3477 __get_user(restorer, &act->sa_restorer) ||
3478 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3479 __get_user(mask, &act->sa_mask))
3480 return -EFAULT;
3481
3482#ifdef __ARCH_HAS_KA_RESTORER
3483 new_ka.ka_restorer = NULL;
3484#endif
3485 new_ka.sa.sa_handler = compat_ptr(handler);
3486 new_ka.sa.sa_restorer = compat_ptr(restorer);
3487 siginitset(&new_ka.sa.sa_mask, mask);
3488 }
3489
3490 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3491
3492 if (!ret && oact) {
3493 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3494 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3495 &oact->sa_handler) ||
3496 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3497 &oact->sa_restorer) ||
3498 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3499 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3500 return -EFAULT;
3501 }
3502 return ret;
3503}
3504#endif
3505
3506#ifdef CONFIG_SGETMASK_SYSCALL
3507
3508
3509
3510
3511SYSCALL_DEFINE0(sgetmask)
3512{
3513
3514 return current->blocked.sig[0];
3515}
3516
3517SYSCALL_DEFINE1(ssetmask, int, newmask)
3518{
3519 int old = current->blocked.sig[0];
3520 sigset_t newset;
3521
3522 siginitset(&newset, newmask);
3523 set_current_blocked(&newset);
3524
3525 return old;
3526}
3527#endif
3528
3529#ifdef __ARCH_WANT_SYS_SIGNAL
3530
3531
3532
3533SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3534{
3535 struct k_sigaction new_sa, old_sa;
3536 int ret;
3537
3538 new_sa.sa.sa_handler = handler;
3539 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3540 sigemptyset(&new_sa.sa.sa_mask);
3541
3542 ret = do_sigaction(sig, &new_sa, &old_sa);
3543
3544 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3545}
3546#endif
3547
3548#ifdef __ARCH_WANT_SYS_PAUSE
3549
3550SYSCALL_DEFINE0(pause)
3551{
3552 while (!signal_pending(current)) {
3553 __set_current_state(TASK_INTERRUPTIBLE);
3554 schedule();
3555 }
3556 return -ERESTARTNOHAND;
3557}
3558
3559#endif
3560
3561int sigsuspend(sigset_t *set)
3562{
3563 current->saved_sigmask = current->blocked;
3564 set_current_blocked(set);
3565
3566 __set_current_state(TASK_INTERRUPTIBLE);
3567 schedule();
3568 set_restore_sigmask();
3569 return -ERESTARTNOHAND;
3570}
3571
3572
3573
3574
3575
3576
3577
3578SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3579{
3580 sigset_t newset;
3581
3582
3583 if (sigsetsize != sizeof(sigset_t))
3584 return -EINVAL;
3585
3586 if (copy_from_user(&newset, unewset, sizeof(newset)))
3587 return -EFAULT;
3588 return sigsuspend(&newset);
3589}
3590
3591#ifdef CONFIG_COMPAT
3592COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3593{
3594#ifdef __BIG_ENDIAN
3595 sigset_t newset;
3596 compat_sigset_t newset32;
3597
3598
3599 if (sigsetsize != sizeof(sigset_t))
3600 return -EINVAL;
3601
3602 if (copy_from_user(&newset32, unewset, sizeof(compat_sigset_t)))
3603 return -EFAULT;
3604 sigset_from_compat(&newset, &newset32);
3605 return sigsuspend(&newset);
3606#else
3607
3608 return sys_rt_sigsuspend((sigset_t __user *)unewset, sigsetsize);
3609#endif
3610}
3611#endif
3612
3613#ifdef CONFIG_OLD_SIGSUSPEND
3614SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3615{
3616 sigset_t blocked;
3617 siginitset(&blocked, mask);
3618 return sigsuspend(&blocked);
3619}
3620#endif
3621#ifdef CONFIG_OLD_SIGSUSPEND3
3622SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3623{
3624 sigset_t blocked;
3625 siginitset(&blocked, mask);
3626 return sigsuspend(&blocked);
3627}
3628#endif
3629
3630__weak const char *arch_vma_name(struct vm_area_struct *vma)
3631{
3632 return NULL;
3633}
3634
3635void __init signals_init(void)
3636{
3637 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3638}
3639
3640#ifdef CONFIG_KGDB_KDB
3641#include <linux/kdb.h>
3642
3643
3644
3645
3646
3647
3648void
3649kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3650{
3651 static struct task_struct *kdb_prev_t;
3652 int sig, new_t;
3653 if (!spin_trylock(&t->sighand->siglock)) {
3654 kdb_printf("Can't do kill command now.\n"
3655 "The sigmask lock is held somewhere else in "
3656 "kernel, try again later\n");
3657 return;
3658 }
3659 spin_unlock(&t->sighand->siglock);
3660 new_t = kdb_prev_t != t;
3661 kdb_prev_t = t;
3662 if (t->state != TASK_RUNNING && new_t) {
3663 kdb_printf("Process is not RUNNING, sending a signal from "
3664 "kdb risks deadlock\n"
3665 "on the run queue locks. "
3666 "The signal has _not_ been sent.\n"
3667 "Reissue the kill command if you want to risk "
3668 "the deadlock.\n");
3669 return;
3670 }
3671 sig = info->si_signo;
3672 if (send_sig_info(sig, info, t))
3673 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3674 sig, t->pid);
3675 else
3676 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3677}
3678#endif
3679