1
2#ifndef _LINUX_SCHED_SIGNAL_H
3#define _LINUX_SCHED_SIGNAL_H
4
5#include <linux/rculist.h>
6#include <linux/signal.h>
7#include <linux/sched.h>
8#include <linux/sched/jobctl.h>
9#include <linux/sched/task.h>
10#include <linux/cred.h>
11#include <linux/refcount.h>
12#include <linux/posix-timers.h>
13#include <linux/mm_types.h>
14#include <asm/ptrace.h>
15
16
17
18
19
20struct sighand_struct {
21 spinlock_t siglock;
22 refcount_t count;
23 wait_queue_head_t signalfd_wqh;
24 struct k_sigaction action[_NSIG];
25};
26
27
28
29
30struct pacct_struct {
31 int ac_flag;
32 long ac_exitcode;
33 unsigned long ac_mem;
34 u64 ac_utime, ac_stime;
35 unsigned long ac_minflt, ac_majflt;
36};
37
38struct cpu_itimer {
39 u64 expires;
40 u64 incr;
41};
42
43
44
45
46
47struct task_cputime_atomic {
48 atomic64_t utime;
49 atomic64_t stime;
50 atomic64_t sum_exec_runtime;
51};
52
53#define INIT_CPUTIME_ATOMIC \
54 (struct task_cputime_atomic) { \
55 .utime = ATOMIC64_INIT(0), \
56 .stime = ATOMIC64_INIT(0), \
57 .sum_exec_runtime = ATOMIC64_INIT(0), \
58 }
59
60
61
62
63
64
65
66struct thread_group_cputimer {
67 struct task_cputime_atomic cputime_atomic;
68};
69
70struct multiprocess_signals {
71 sigset_t signal;
72 struct hlist_node node;
73};
74
75struct core_thread {
76 struct task_struct *task;
77 struct core_thread *next;
78};
79
80struct core_state {
81 atomic_t nr_threads;
82 struct core_thread dumper;
83 struct completion startup;
84};
85
86
87
88
89
90
91
92
93struct signal_struct {
94 refcount_t sigcnt;
95 atomic_t live;
96 int nr_threads;
97 struct list_head thread_head;
98
99 wait_queue_head_t wait_chldexit;
100
101
102 struct task_struct *curr_target;
103
104
105 struct sigpending shared_pending;
106
107
108 struct hlist_head multiprocess;
109
110
111 int group_exit_code;
112
113 int notify_count;
114 struct task_struct *group_exec_task;
115
116
117 int group_stop_count;
118 unsigned int flags;
119
120 struct core_state *core_state;
121
122
123
124
125
126
127
128
129
130
131 unsigned int is_child_subreaper:1;
132 unsigned int has_child_subreaper:1;
133
134#ifdef CONFIG_POSIX_TIMERS
135
136
137 int posix_timer_id;
138 struct list_head posix_timers;
139
140
141 struct hrtimer real_timer;
142 ktime_t it_real_incr;
143
144
145
146
147
148
149 struct cpu_itimer it[2];
150
151
152
153
154
155 struct thread_group_cputimer cputimer;
156
157#endif
158
159 struct posix_cputimers posix_cputimers;
160
161
162 struct pid *pids[PIDTYPE_MAX];
163
164#ifdef CONFIG_NO_HZ_FULL
165 atomic_t tick_dep_mask;
166#endif
167
168 struct pid *tty_old_pgrp;
169
170
171 int leader;
172
173 struct tty_struct *tty;
174
175#ifdef CONFIG_SCHED_AUTOGROUP
176 struct autogroup *autogroup;
177#endif
178
179
180
181
182
183
184 seqlock_t stats_lock;
185 u64 utime, stime, cutime, cstime;
186 u64 gtime;
187 u64 cgtime;
188 struct prev_cputime prev_cputime;
189 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
190 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
191 unsigned long inblock, oublock, cinblock, coublock;
192 unsigned long maxrss, cmaxrss;
193 struct task_io_accounting ioac;
194
195
196
197
198
199
200
201 unsigned long long sum_sched_runtime;
202
203
204
205
206
207
208
209
210
211
212 struct rlimit rlim[RLIM_NLIMITS];
213
214#ifdef CONFIG_BSD_PROCESS_ACCT
215 struct pacct_struct pacct;
216#endif
217#ifdef CONFIG_TASKSTATS
218 struct taskstats *stats;
219#endif
220#ifdef CONFIG_AUDIT
221 unsigned audit_tty;
222 struct tty_audit_buf *tty_audit_buf;
223#endif
224
225
226
227
228
229 bool oom_flag_origin;
230 short oom_score_adj;
231 short oom_score_adj_min;
232
233 struct mm_struct *oom_mm;
234
235
236 struct mutex cred_guard_mutex;
237
238
239
240
241
242 struct rw_semaphore exec_update_lock;
243
244
245
246
247} __randomize_layout;
248
249
250
251
252#define SIGNAL_STOP_STOPPED 0x00000001
253#define SIGNAL_STOP_CONTINUED 0x00000002
254#define SIGNAL_GROUP_EXIT 0x00000004
255
256
257
258#define SIGNAL_CLD_STOPPED 0x00000010
259#define SIGNAL_CLD_CONTINUED 0x00000020
260#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
261
262#define SIGNAL_UNKILLABLE 0x00000040
263
264#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
265 SIGNAL_STOP_CONTINUED)
266
267static inline void signal_set_stop_flags(struct signal_struct *sig,
268 unsigned int flags)
269{
270 WARN_ON(sig->flags & SIGNAL_GROUP_EXIT);
271 sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
272}
273
274extern void flush_signals(struct task_struct *);
275extern void ignore_signals(struct task_struct *);
276extern void flush_signal_handlers(struct task_struct *, int force_default);
277extern int dequeue_signal(struct task_struct *task, sigset_t *mask,
278 kernel_siginfo_t *info, enum pid_type *type);
279
280static inline int kernel_dequeue_signal(void)
281{
282 struct task_struct *task = current;
283 kernel_siginfo_t __info;
284 enum pid_type __type;
285 int ret;
286
287 spin_lock_irq(&task->sighand->siglock);
288 ret = dequeue_signal(task, &task->blocked, &__info, &__type);
289 spin_unlock_irq(&task->sighand->siglock);
290
291 return ret;
292}
293
294static inline void kernel_signal_stop(void)
295{
296 spin_lock_irq(¤t->sighand->siglock);
297 if (current->jobctl & JOBCTL_STOP_DEQUEUED)
298 set_special_state(TASK_STOPPED);
299 spin_unlock_irq(¤t->sighand->siglock);
300
301 schedule();
302}
303#ifdef __ia64__
304# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
305#else
306# define ___ARCH_SI_IA64(_a1, _a2, _a3)
307#endif
308
309int force_sig_fault_to_task(int sig, int code, void __user *addr
310 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
311 , struct task_struct *t);
312int force_sig_fault(int sig, int code, void __user *addr
313 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr));
314int send_sig_fault(int sig, int code, void __user *addr
315 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
316 , struct task_struct *t);
317
318int force_sig_mceerr(int code, void __user *, short);
319int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
320
321int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
322int force_sig_pkuerr(void __user *addr, u32 pkey);
323int force_sig_perf(void __user *addr, u32 type, u64 sig_data);
324
325int force_sig_ptrace_errno_trap(int errno, void __user *addr);
326int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno);
327int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
328 struct task_struct *t);
329int force_sig_seccomp(int syscall, int reason, bool force_coredump);
330
331extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
332extern void force_sigsegv(int sig);
333extern int force_sig_info(struct kernel_siginfo *);
334extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
335extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
336extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *,
337 const struct cred *);
338extern int kill_pgrp(struct pid *pid, int sig, int priv);
339extern int kill_pid(struct pid *pid, int sig, int priv);
340extern __must_check bool do_notify_parent(struct task_struct *, int);
341extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
342extern void force_sig(int);
343extern void force_fatal_sig(int);
344extern void force_exit_sig(int);
345extern int send_sig(int, struct task_struct *, int);
346extern int zap_other_threads(struct task_struct *p);
347extern struct sigqueue *sigqueue_alloc(void);
348extern void sigqueue_free(struct sigqueue *);
349extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
350extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
351
352static inline int restart_syscall(void)
353{
354 set_tsk_thread_flag(current, TIF_SIGPENDING);
355 return -ERESTARTNOINTR;
356}
357
358static inline int task_sigpending(struct task_struct *p)
359{
360 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
361}
362
363static inline int signal_pending(struct task_struct *p)
364{
365
366
367
368
369
370 if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL)))
371 return 1;
372 return task_sigpending(p);
373}
374
375static inline int __fatal_signal_pending(struct task_struct *p)
376{
377 return unlikely(sigismember(&p->pending.signal, SIGKILL));
378}
379
380static inline int fatal_signal_pending(struct task_struct *p)
381{
382 return task_sigpending(p) && __fatal_signal_pending(p);
383}
384
385static inline int signal_pending_state(unsigned int state, struct task_struct *p)
386{
387 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
388 return 0;
389 if (!signal_pending(p))
390 return 0;
391
392 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
393}
394
395
396
397
398
399
400
401static inline bool fault_signal_pending(vm_fault_t fault_flags,
402 struct pt_regs *regs)
403{
404 return unlikely((fault_flags & VM_FAULT_RETRY) &&
405 (fatal_signal_pending(current) ||
406 (user_mode(regs) && signal_pending(current))));
407}
408
409
410
411
412
413
414
415extern void recalc_sigpending_and_wake(struct task_struct *t);
416extern void recalc_sigpending(void);
417extern void calculate_sigpending(void);
418
419extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
420
421static inline void signal_wake_up(struct task_struct *t, bool resume)
422{
423 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
424}
425static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
426{
427 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
428}
429
430void task_join_group_stop(struct task_struct *task);
431
432#ifdef TIF_RESTORE_SIGMASK
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449static inline void set_restore_sigmask(void)
450{
451 set_thread_flag(TIF_RESTORE_SIGMASK);
452}
453
454static inline void clear_tsk_restore_sigmask(struct task_struct *task)
455{
456 clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
457}
458
459static inline void clear_restore_sigmask(void)
460{
461 clear_thread_flag(TIF_RESTORE_SIGMASK);
462}
463static inline bool test_tsk_restore_sigmask(struct task_struct *task)
464{
465 return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
466}
467static inline bool test_restore_sigmask(void)
468{
469 return test_thread_flag(TIF_RESTORE_SIGMASK);
470}
471static inline bool test_and_clear_restore_sigmask(void)
472{
473 return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
474}
475
476#else
477
478
479static inline void set_restore_sigmask(void)
480{
481 current->restore_sigmask = true;
482}
483static inline void clear_tsk_restore_sigmask(struct task_struct *task)
484{
485 task->restore_sigmask = false;
486}
487static inline void clear_restore_sigmask(void)
488{
489 current->restore_sigmask = false;
490}
491static inline bool test_restore_sigmask(void)
492{
493 return current->restore_sigmask;
494}
495static inline bool test_tsk_restore_sigmask(struct task_struct *task)
496{
497 return task->restore_sigmask;
498}
499static inline bool test_and_clear_restore_sigmask(void)
500{
501 if (!current->restore_sigmask)
502 return false;
503 current->restore_sigmask = false;
504 return true;
505}
506#endif
507
508static inline void restore_saved_sigmask(void)
509{
510 if (test_and_clear_restore_sigmask())
511 __set_current_blocked(¤t->saved_sigmask);
512}
513
514extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize);
515
516static inline void restore_saved_sigmask_unless(bool interrupted)
517{
518 if (interrupted)
519 WARN_ON(!signal_pending(current));
520 else
521 restore_saved_sigmask();
522}
523
524static inline sigset_t *sigmask_to_save(void)
525{
526 sigset_t *res = ¤t->blocked;
527 if (unlikely(test_restore_sigmask()))
528 res = ¤t->saved_sigmask;
529 return res;
530}
531
532static inline int kill_cad_pid(int sig, int priv)
533{
534 return kill_pid(cad_pid, sig, priv);
535}
536
537
538#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
539#define SEND_SIG_PRIV ((struct kernel_siginfo *) 1)
540
541static inline int __on_sig_stack(unsigned long sp)
542{
543#ifdef CONFIG_STACK_GROWSUP
544 return sp >= current->sas_ss_sp &&
545 sp - current->sas_ss_sp < current->sas_ss_size;
546#else
547 return sp > current->sas_ss_sp &&
548 sp - current->sas_ss_sp <= current->sas_ss_size;
549#endif
550}
551
552
553
554
555static inline int on_sig_stack(unsigned long sp)
556{
557
558
559
560
561
562
563
564
565
566 if (current->sas_ss_flags & SS_AUTODISARM)
567 return 0;
568
569 return __on_sig_stack(sp);
570}
571
572static inline int sas_ss_flags(unsigned long sp)
573{
574 if (!current->sas_ss_size)
575 return SS_DISABLE;
576
577 return on_sig_stack(sp) ? SS_ONSTACK : 0;
578}
579
580static inline void sas_ss_reset(struct task_struct *p)
581{
582 p->sas_ss_sp = 0;
583 p->sas_ss_size = 0;
584 p->sas_ss_flags = SS_DISABLE;
585}
586
587static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
588{
589 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
590#ifdef CONFIG_STACK_GROWSUP
591 return current->sas_ss_sp;
592#else
593 return current->sas_ss_sp + current->sas_ss_size;
594#endif
595 return sp;
596}
597
598extern void __cleanup_sighand(struct sighand_struct *);
599extern void flush_itimer_signals(void);
600
601#define tasklist_empty() \
602 list_empty(&init_task.tasks)
603
604#define next_task(p) \
605 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
606
607#define for_each_process(p) \
608 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
609
610extern bool current_is_single_threaded(void);
611
612
613
614
615
616#define do_each_thread(g, t) \
617 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
618
619#define while_each_thread(g, t) \
620 while ((t = next_thread(t)) != g)
621
622#define __for_each_thread(signal, t) \
623 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
624
625#define for_each_thread(p, t) \
626 __for_each_thread((p)->signal, t)
627
628
629#define for_each_process_thread(p, t) \
630 for_each_process(p) for_each_thread(p, t)
631
632typedef int (*proc_visitor)(struct task_struct *p, void *data);
633void walk_process_tree(struct task_struct *top, proc_visitor, void *);
634
635static inline
636struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
637{
638 struct pid *pid;
639 if (type == PIDTYPE_PID)
640 pid = task_pid(task);
641 else
642 pid = task->signal->pids[type];
643 return pid;
644}
645
646static inline struct pid *task_tgid(struct task_struct *task)
647{
648 return task->signal->pids[PIDTYPE_TGID];
649}
650
651
652
653
654
655
656static inline struct pid *task_pgrp(struct task_struct *task)
657{
658 return task->signal->pids[PIDTYPE_PGID];
659}
660
661static inline struct pid *task_session(struct task_struct *task)
662{
663 return task->signal->pids[PIDTYPE_SID];
664}
665
666static inline int get_nr_threads(struct task_struct *task)
667{
668 return task->signal->nr_threads;
669}
670
671static inline bool thread_group_leader(struct task_struct *p)
672{
673 return p->exit_signal >= 0;
674}
675
676static inline
677bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
678{
679 return p1->signal == p2->signal;
680}
681
682static inline struct task_struct *next_thread(const struct task_struct *p)
683{
684 return list_entry_rcu(p->thread_group.next,
685 struct task_struct, thread_group);
686}
687
688static inline int thread_group_empty(struct task_struct *p)
689{
690 return list_empty(&p->thread_group);
691}
692
693#define delay_group_leader(p) \
694 (thread_group_leader(p) && !thread_group_empty(p))
695
696extern bool thread_group_exited(struct pid *pid);
697
698extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
699 unsigned long *flags);
700
701static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
702 unsigned long *flags)
703{
704 struct sighand_struct *ret;
705
706 ret = __lock_task_sighand(task, flags);
707 (void)__cond_lock(&task->sighand->siglock, ret);
708 return ret;
709}
710
711static inline void unlock_task_sighand(struct task_struct *task,
712 unsigned long *flags)
713{
714 spin_unlock_irqrestore(&task->sighand->siglock, *flags);
715}
716
717#ifdef CONFIG_LOCKDEP
718extern void lockdep_assert_task_sighand_held(struct task_struct *task);
719#else
720static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { }
721#endif
722
723static inline unsigned long task_rlimit(const struct task_struct *task,
724 unsigned int limit)
725{
726 return READ_ONCE(task->signal->rlim[limit].rlim_cur);
727}
728
729static inline unsigned long task_rlimit_max(const struct task_struct *task,
730 unsigned int limit)
731{
732 return READ_ONCE(task->signal->rlim[limit].rlim_max);
733}
734
735static inline unsigned long rlimit(unsigned int limit)
736{
737 return task_rlimit(current, limit);
738}
739
740static inline unsigned long rlimit_max(unsigned int limit)
741{
742 return task_rlimit_max(current, limit);
743}
744
745#endif
746