linux/include/linux/sched/signal.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SCHED_SIGNAL_H
   3#define _LINUX_SCHED_SIGNAL_H
   4
   5#include <linux/rculist.h>
   6#include <linux/signal.h>
   7#include <linux/sched.h>
   8#include <linux/sched/jobctl.h>
   9#include <linux/sched/task.h>
  10#include <linux/cred.h>
  11#include <linux/refcount.h>
  12#include <linux/posix-timers.h>
  13#include <linux/mm_types.h>
  14#include <asm/ptrace.h>
  15
  16/*
  17 * Types defining task->signal and task->sighand and APIs using them:
  18 */
  19
  20struct sighand_struct {
  21        spinlock_t              siglock;
  22        refcount_t              count;
  23        wait_queue_head_t       signalfd_wqh;
  24        struct k_sigaction      action[_NSIG];
  25};
  26
  27/*
  28 * Per-process accounting stats:
  29 */
  30struct pacct_struct {
  31        int                     ac_flag;
  32        long                    ac_exitcode;
  33        unsigned long           ac_mem;
  34        u64                     ac_utime, ac_stime;
  35        unsigned long           ac_minflt, ac_majflt;
  36};
  37
  38struct cpu_itimer {
  39        u64 expires;
  40        u64 incr;
  41};
  42
  43/*
  44 * This is the atomic variant of task_cputime, which can be used for
  45 * storing and updating task_cputime statistics without locking.
  46 */
  47struct task_cputime_atomic {
  48        atomic64_t utime;
  49        atomic64_t stime;
  50        atomic64_t sum_exec_runtime;
  51};
  52
  53#define INIT_CPUTIME_ATOMIC \
  54        (struct task_cputime_atomic) {                          \
  55                .utime = ATOMIC64_INIT(0),                      \
  56                .stime = ATOMIC64_INIT(0),                      \
  57                .sum_exec_runtime = ATOMIC64_INIT(0),           \
  58        }
  59/**
  60 * struct thread_group_cputimer - thread group interval timer counts
  61 * @cputime_atomic:     atomic thread group interval timers.
  62 *
  63 * This structure contains the version of task_cputime, above, that is
  64 * used for thread group CPU timer calculations.
  65 */
  66struct thread_group_cputimer {
  67        struct task_cputime_atomic cputime_atomic;
  68};
  69
  70struct multiprocess_signals {
  71        sigset_t signal;
  72        struct hlist_node node;
  73};
  74
  75/*
  76 * NOTE! "signal_struct" does not have its own
  77 * locking, because a shared signal_struct always
  78 * implies a shared sighand_struct, so locking
  79 * sighand_struct is always a proper superset of
  80 * the locking of signal_struct.
  81 */
  82struct signal_struct {
  83        refcount_t              sigcnt;
  84        atomic_t                live;
  85        int                     nr_threads;
  86        struct list_head        thread_head;
  87
  88        wait_queue_head_t       wait_chldexit;  /* for wait4() */
  89
  90        /* current thread group signal load-balancing target: */
  91        struct task_struct      *curr_target;
  92
  93        /* shared signal handling: */
  94        struct sigpending       shared_pending;
  95
  96        /* For collecting multiprocess signals during fork */
  97        struct hlist_head       multiprocess;
  98
  99        /* thread group exit support */
 100        int                     group_exit_code;
 101        /* overloaded:
 102         * - notify group_exit_task when ->count is equal to notify_count
 103         * - everyone except group_exit_task is stopped during signal delivery
 104         *   of fatal signals, group_exit_task processes the signal.
 105         */
 106        int                     notify_count;
 107        struct task_struct      *group_exit_task;
 108
 109        /* thread group stop support, overloads group_exit_code too */
 110        int                     group_stop_count;
 111        unsigned int            flags; /* see SIGNAL_* flags below */
 112
 113        /*
 114         * PR_SET_CHILD_SUBREAPER marks a process, like a service
 115         * manager, to re-parent orphan (double-forking) child processes
 116         * to this process instead of 'init'. The service manager is
 117         * able to receive SIGCHLD signals and is able to investigate
 118         * the process until it calls wait(). All children of this
 119         * process will inherit a flag if they should look for a
 120         * child_subreaper process at exit.
 121         */
 122        unsigned int            is_child_subreaper:1;
 123        unsigned int            has_child_subreaper:1;
 124
 125#ifdef CONFIG_POSIX_TIMERS
 126
 127        /* POSIX.1b Interval Timers */
 128        int                     posix_timer_id;
 129        struct list_head        posix_timers;
 130
 131        /* ITIMER_REAL timer for the process */
 132        struct hrtimer real_timer;
 133        ktime_t it_real_incr;
 134
 135        /*
 136         * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
 137         * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
 138         * values are defined to 0 and 1 respectively
 139         */
 140        struct cpu_itimer it[2];
 141
 142        /*
 143         * Thread group totals for process CPU timers.
 144         * See thread_group_cputimer(), et al, for details.
 145         */
 146        struct thread_group_cputimer cputimer;
 147
 148#endif
 149        /* Empty if CONFIG_POSIX_TIMERS=n */
 150        struct posix_cputimers posix_cputimers;
 151
 152        /* PID/PID hash table linkage. */
 153        struct pid *pids[PIDTYPE_MAX];
 154
 155#ifdef CONFIG_NO_HZ_FULL
 156        atomic_t tick_dep_mask;
 157#endif
 158
 159        struct pid *tty_old_pgrp;
 160
 161        /* boolean value for session group leader */
 162        int leader;
 163
 164        struct tty_struct *tty; /* NULL if no tty */
 165
 166#ifdef CONFIG_SCHED_AUTOGROUP
 167        struct autogroup *autogroup;
 168#endif
 169        /*
 170         * Cumulative resource counters for dead threads in the group,
 171         * and for reaped dead child processes forked by this group.
 172         * Live threads maintain their own counters and add to these
 173         * in __exit_signal, except for the group leader.
 174         */
 175        seqlock_t stats_lock;
 176        u64 utime, stime, cutime, cstime;
 177        u64 gtime;
 178        u64 cgtime;
 179        struct prev_cputime prev_cputime;
 180        unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
 181        unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
 182        unsigned long inblock, oublock, cinblock, coublock;
 183        unsigned long maxrss, cmaxrss;
 184        struct task_io_accounting ioac;
 185
 186        /*
 187         * Cumulative ns of schedule CPU time fo dead threads in the
 188         * group, not including a zombie group leader, (This only differs
 189         * from jiffies_to_ns(utime + stime) if sched_clock uses something
 190         * other than jiffies.)
 191         */
 192        unsigned long long sum_sched_runtime;
 193
 194        /*
 195         * We don't bother to synchronize most readers of this at all,
 196         * because there is no reader checking a limit that actually needs
 197         * to get both rlim_cur and rlim_max atomically, and either one
 198         * alone is a single word that can safely be read normally.
 199         * getrlimit/setrlimit use task_lock(current->group_leader) to
 200         * protect this instead of the siglock, because they really
 201         * have no need to disable irqs.
 202         */
 203        struct rlimit rlim[RLIM_NLIMITS];
 204
 205#ifdef CONFIG_BSD_PROCESS_ACCT
 206        struct pacct_struct pacct;      /* per-process accounting information */
 207#endif
 208#ifdef CONFIG_TASKSTATS
 209        struct taskstats *stats;
 210#endif
 211#ifdef CONFIG_AUDIT
 212        unsigned audit_tty;
 213        struct tty_audit_buf *tty_audit_buf;
 214#endif
 215
 216        /*
 217         * Thread is the potential origin of an oom condition; kill first on
 218         * oom
 219         */
 220        bool oom_flag_origin;
 221        short oom_score_adj;            /* OOM kill score adjustment */
 222        short oom_score_adj_min;        /* OOM kill score adjustment min value.
 223                                         * Only settable by CAP_SYS_RESOURCE. */
 224        struct mm_struct *oom_mm;       /* recorded mm when the thread group got
 225                                         * killed by the oom killer */
 226
 227        struct mutex cred_guard_mutex;  /* guard against foreign influences on
 228                                         * credential calculations
 229                                         * (notably. ptrace)
 230                                         * Deprecated do not use in new code.
 231                                         * Use exec_update_lock instead.
 232                                         */
 233        struct rw_semaphore exec_update_lock;   /* Held while task_struct is
 234                                                 * being updated during exec,
 235                                                 * and may have inconsistent
 236                                                 * permissions.
 237                                                 */
 238} __randomize_layout;
 239
 240/*
 241 * Bits in flags field of signal_struct.
 242 */
 243#define SIGNAL_STOP_STOPPED     0x00000001 /* job control stop in effect */
 244#define SIGNAL_STOP_CONTINUED   0x00000002 /* SIGCONT since WCONTINUED reap */
 245#define SIGNAL_GROUP_EXIT       0x00000004 /* group exit in progress */
 246#define SIGNAL_GROUP_COREDUMP   0x00000008 /* coredump in progress */
 247/*
 248 * Pending notifications to parent.
 249 */
 250#define SIGNAL_CLD_STOPPED      0x00000010
 251#define SIGNAL_CLD_CONTINUED    0x00000020
 252#define SIGNAL_CLD_MASK         (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
 253
 254#define SIGNAL_UNKILLABLE       0x00000040 /* for init: ignore fatal signals */
 255
 256#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
 257                          SIGNAL_STOP_CONTINUED)
 258
 259static inline void signal_set_stop_flags(struct signal_struct *sig,
 260                                         unsigned int flags)
 261{
 262        WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
 263        sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
 264}
 265
 266/* If true, all threads except ->group_exit_task have pending SIGKILL */
 267static inline int signal_group_exit(const struct signal_struct *sig)
 268{
 269        return  (sig->flags & SIGNAL_GROUP_EXIT) ||
 270                (sig->group_exit_task != NULL);
 271}
 272
 273extern void flush_signals(struct task_struct *);
 274extern void ignore_signals(struct task_struct *);
 275extern void flush_signal_handlers(struct task_struct *, int force_default);
 276extern int dequeue_signal(struct task_struct *task,
 277                          sigset_t *mask, kernel_siginfo_t *info);
 278
 279static inline int kernel_dequeue_signal(void)
 280{
 281        struct task_struct *task = current;
 282        kernel_siginfo_t __info;
 283        int ret;
 284
 285        spin_lock_irq(&task->sighand->siglock);
 286        ret = dequeue_signal(task, &task->blocked, &__info);
 287        spin_unlock_irq(&task->sighand->siglock);
 288
 289        return ret;
 290}
 291
 292static inline void kernel_signal_stop(void)
 293{
 294        spin_lock_irq(&current->sighand->siglock);
 295        if (current->jobctl & JOBCTL_STOP_DEQUEUED)
 296                set_special_state(TASK_STOPPED);
 297        spin_unlock_irq(&current->sighand->siglock);
 298
 299        schedule();
 300}
 301#ifdef __ia64__
 302# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
 303#else
 304# define ___ARCH_SI_IA64(_a1, _a2, _a3)
 305#endif
 306
 307int force_sig_fault_to_task(int sig, int code, void __user *addr
 308        ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
 309        , struct task_struct *t);
 310int force_sig_fault(int sig, int code, void __user *addr
 311        ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr));
 312int send_sig_fault(int sig, int code, void __user *addr
 313        ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
 314        , struct task_struct *t);
 315
 316int force_sig_mceerr(int code, void __user *, short);
 317int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
 318
 319int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
 320int force_sig_pkuerr(void __user *addr, u32 pkey);
 321int force_sig_perf(void __user *addr, u32 type, u64 sig_data);
 322
 323int force_sig_ptrace_errno_trap(int errno, void __user *addr);
 324int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno);
 325int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
 326                        struct task_struct *t);
 327int force_sig_seccomp(int syscall, int reason, bool force_coredump);
 328
 329extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
 330extern void force_sigsegv(int sig);
 331extern int force_sig_info(struct kernel_siginfo *);
 332extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
 333extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
 334extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *,
 335                                const struct cred *);
 336extern int kill_pgrp(struct pid *pid, int sig, int priv);
 337extern int kill_pid(struct pid *pid, int sig, int priv);
 338extern __must_check bool do_notify_parent(struct task_struct *, int);
 339extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
 340extern void force_sig(int);
 341extern int send_sig(int, struct task_struct *, int);
 342extern int zap_other_threads(struct task_struct *p);
 343extern struct sigqueue *sigqueue_alloc(void);
 344extern void sigqueue_free(struct sigqueue *);
 345extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
 346extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
 347
 348static inline int restart_syscall(void)
 349{
 350        set_tsk_thread_flag(current, TIF_SIGPENDING);
 351        return -ERESTARTNOINTR;
 352}
 353
 354static inline int task_sigpending(struct task_struct *p)
 355{
 356        return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
 357}
 358
 359static inline int signal_pending(struct task_struct *p)
 360{
 361        /*
 362         * TIF_NOTIFY_SIGNAL isn't really a signal, but it requires the same
 363         * behavior in terms of ensuring that we break out of wait loops
 364         * so that notify signal callbacks can be processed.
 365         */
 366        if (unlikely(test_tsk_thread_flag(p, TIF_NOTIFY_SIGNAL)))
 367                return 1;
 368        return task_sigpending(p);
 369}
 370
 371static inline int __fatal_signal_pending(struct task_struct *p)
 372{
 373        return unlikely(sigismember(&p->pending.signal, SIGKILL));
 374}
 375
 376static inline int fatal_signal_pending(struct task_struct *p)
 377{
 378        return task_sigpending(p) && __fatal_signal_pending(p);
 379}
 380
 381static inline int signal_pending_state(unsigned int state, struct task_struct *p)
 382{
 383        if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
 384                return 0;
 385        if (!signal_pending(p))
 386                return 0;
 387
 388        return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
 389}
 390
 391/*
 392 * This should only be used in fault handlers to decide whether we
 393 * should stop the current fault routine to handle the signals
 394 * instead, especially with the case where we've got interrupted with
 395 * a VM_FAULT_RETRY.
 396 */
 397static inline bool fault_signal_pending(vm_fault_t fault_flags,
 398                                        struct pt_regs *regs)
 399{
 400        return unlikely((fault_flags & VM_FAULT_RETRY) &&
 401                        (fatal_signal_pending(current) ||
 402                         (user_mode(regs) && signal_pending(current))));
 403}
 404
 405/*
 406 * Reevaluate whether the task has signals pending delivery.
 407 * Wake the task if so.
 408 * This is required every time the blocked sigset_t changes.
 409 * callers must hold sighand->siglock.
 410 */
 411extern void recalc_sigpending_and_wake(struct task_struct *t);
 412extern void recalc_sigpending(void);
 413extern void calculate_sigpending(void);
 414
 415extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
 416
 417static inline void signal_wake_up(struct task_struct *t, bool resume)
 418{
 419        signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
 420}
 421static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
 422{
 423        signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
 424}
 425
 426void task_join_group_stop(struct task_struct *task);
 427
 428#ifdef TIF_RESTORE_SIGMASK
 429/*
 430 * Legacy restore_sigmask accessors.  These are inefficient on
 431 * SMP architectures because they require atomic operations.
 432 */
 433
 434/**
 435 * set_restore_sigmask() - make sure saved_sigmask processing gets done
 436 *
 437 * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
 438 * will run before returning to user mode, to process the flag.  For
 439 * all callers, TIF_SIGPENDING is already set or it's no harm to set
 440 * it.  TIF_RESTORE_SIGMASK need not be in the set of bits that the
 441 * arch code will notice on return to user mode, in case those bits
 442 * are scarce.  We set TIF_SIGPENDING here to ensure that the arch
 443 * signal code always gets run when TIF_RESTORE_SIGMASK is set.
 444 */
 445static inline void set_restore_sigmask(void)
 446{
 447        set_thread_flag(TIF_RESTORE_SIGMASK);
 448}
 449
 450static inline void clear_tsk_restore_sigmask(struct task_struct *task)
 451{
 452        clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
 453}
 454
 455static inline void clear_restore_sigmask(void)
 456{
 457        clear_thread_flag(TIF_RESTORE_SIGMASK);
 458}
 459static inline bool test_tsk_restore_sigmask(struct task_struct *task)
 460{
 461        return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
 462}
 463static inline bool test_restore_sigmask(void)
 464{
 465        return test_thread_flag(TIF_RESTORE_SIGMASK);
 466}
 467static inline bool test_and_clear_restore_sigmask(void)
 468{
 469        return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
 470}
 471
 472#else   /* TIF_RESTORE_SIGMASK */
 473
 474/* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
 475static inline void set_restore_sigmask(void)
 476{
 477        current->restore_sigmask = true;
 478}
 479static inline void clear_tsk_restore_sigmask(struct task_struct *task)
 480{
 481        task->restore_sigmask = false;
 482}
 483static inline void clear_restore_sigmask(void)
 484{
 485        current->restore_sigmask = false;
 486}
 487static inline bool test_restore_sigmask(void)
 488{
 489        return current->restore_sigmask;
 490}
 491static inline bool test_tsk_restore_sigmask(struct task_struct *task)
 492{
 493        return task->restore_sigmask;
 494}
 495static inline bool test_and_clear_restore_sigmask(void)
 496{
 497        if (!current->restore_sigmask)
 498                return false;
 499        current->restore_sigmask = false;
 500        return true;
 501}
 502#endif
 503
 504static inline void restore_saved_sigmask(void)
 505{
 506        if (test_and_clear_restore_sigmask())
 507                __set_current_blocked(&current->saved_sigmask);
 508}
 509
 510extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize);
 511
 512static inline void restore_saved_sigmask_unless(bool interrupted)
 513{
 514        if (interrupted)
 515                WARN_ON(!signal_pending(current));
 516        else
 517                restore_saved_sigmask();
 518}
 519
 520static inline sigset_t *sigmask_to_save(void)
 521{
 522        sigset_t *res = &current->blocked;
 523        if (unlikely(test_restore_sigmask()))
 524                res = &current->saved_sigmask;
 525        return res;
 526}
 527
 528static inline int kill_cad_pid(int sig, int priv)
 529{
 530        return kill_pid(cad_pid, sig, priv);
 531}
 532
 533/* These can be the second arg to send_sig_info/send_group_sig_info.  */
 534#define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
 535#define SEND_SIG_PRIV   ((struct kernel_siginfo *) 1)
 536
 537static inline int __on_sig_stack(unsigned long sp)
 538{
 539#ifdef CONFIG_STACK_GROWSUP
 540        return sp >= current->sas_ss_sp &&
 541                sp - current->sas_ss_sp < current->sas_ss_size;
 542#else
 543        return sp > current->sas_ss_sp &&
 544                sp - current->sas_ss_sp <= current->sas_ss_size;
 545#endif
 546}
 547
 548/*
 549 * True if we are on the alternate signal stack.
 550 */
 551static inline int on_sig_stack(unsigned long sp)
 552{
 553        /*
 554         * If the signal stack is SS_AUTODISARM then, by construction, we
 555         * can't be on the signal stack unless user code deliberately set
 556         * SS_AUTODISARM when we were already on it.
 557         *
 558         * This improves reliability: if user state gets corrupted such that
 559         * the stack pointer points very close to the end of the signal stack,
 560         * then this check will enable the signal to be handled anyway.
 561         */
 562        if (current->sas_ss_flags & SS_AUTODISARM)
 563                return 0;
 564
 565        return __on_sig_stack(sp);
 566}
 567
 568static inline int sas_ss_flags(unsigned long sp)
 569{
 570        if (!current->sas_ss_size)
 571                return SS_DISABLE;
 572
 573        return on_sig_stack(sp) ? SS_ONSTACK : 0;
 574}
 575
 576static inline void sas_ss_reset(struct task_struct *p)
 577{
 578        p->sas_ss_sp = 0;
 579        p->sas_ss_size = 0;
 580        p->sas_ss_flags = SS_DISABLE;
 581}
 582
 583static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
 584{
 585        if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
 586#ifdef CONFIG_STACK_GROWSUP
 587                return current->sas_ss_sp;
 588#else
 589                return current->sas_ss_sp + current->sas_ss_size;
 590#endif
 591        return sp;
 592}
 593
 594extern void __cleanup_sighand(struct sighand_struct *);
 595extern void flush_itimer_signals(void);
 596
 597#define tasklist_empty() \
 598        list_empty(&init_task.tasks)
 599
 600#define next_task(p) \
 601        list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
 602
 603#define for_each_process(p) \
 604        for (p = &init_task ; (p = next_task(p)) != &init_task ; )
 605
 606extern bool current_is_single_threaded(void);
 607
 608/*
 609 * Careful: do_each_thread/while_each_thread is a double loop so
 610 *          'break' will not work as expected - use goto instead.
 611 */
 612#define do_each_thread(g, t) \
 613        for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
 614
 615#define while_each_thread(g, t) \
 616        while ((t = next_thread(t)) != g)
 617
 618#define __for_each_thread(signal, t)    \
 619        list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
 620
 621#define for_each_thread(p, t)           \
 622        __for_each_thread((p)->signal, t)
 623
 624/* Careful: this is a double loop, 'break' won't work as expected. */
 625#define for_each_process_thread(p, t)   \
 626        for_each_process(p) for_each_thread(p, t)
 627
 628typedef int (*proc_visitor)(struct task_struct *p, void *data);
 629void walk_process_tree(struct task_struct *top, proc_visitor, void *);
 630
 631static inline
 632struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
 633{
 634        struct pid *pid;
 635        if (type == PIDTYPE_PID)
 636                pid = task_pid(task);
 637        else
 638                pid = task->signal->pids[type];
 639        return pid;
 640}
 641
 642static inline struct pid *task_tgid(struct task_struct *task)
 643{
 644        return task->signal->pids[PIDTYPE_TGID];
 645}
 646
 647/*
 648 * Without tasklist or RCU lock it is not safe to dereference
 649 * the result of task_pgrp/task_session even if task == current,
 650 * we can race with another thread doing sys_setsid/sys_setpgid.
 651 */
 652static inline struct pid *task_pgrp(struct task_struct *task)
 653{
 654        return task->signal->pids[PIDTYPE_PGID];
 655}
 656
 657static inline struct pid *task_session(struct task_struct *task)
 658{
 659        return task->signal->pids[PIDTYPE_SID];
 660}
 661
 662static inline int get_nr_threads(struct task_struct *task)
 663{
 664        return task->signal->nr_threads;
 665}
 666
 667static inline bool thread_group_leader(struct task_struct *p)
 668{
 669        return p->exit_signal >= 0;
 670}
 671
 672static inline
 673bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
 674{
 675        return p1->signal == p2->signal;
 676}
 677
 678static inline struct task_struct *next_thread(const struct task_struct *p)
 679{
 680        return list_entry_rcu(p->thread_group.next,
 681                              struct task_struct, thread_group);
 682}
 683
 684static inline int thread_group_empty(struct task_struct *p)
 685{
 686        return list_empty(&p->thread_group);
 687}
 688
 689#define delay_group_leader(p) \
 690                (thread_group_leader(p) && !thread_group_empty(p))
 691
 692extern bool thread_group_exited(struct pid *pid);
 693
 694extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
 695                                                        unsigned long *flags);
 696
 697static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
 698                                                       unsigned long *flags)
 699{
 700        struct sighand_struct *ret;
 701
 702        ret = __lock_task_sighand(task, flags);
 703        (void)__cond_lock(&task->sighand->siglock, ret);
 704        return ret;
 705}
 706
 707static inline void unlock_task_sighand(struct task_struct *task,
 708                                                unsigned long *flags)
 709{
 710        spin_unlock_irqrestore(&task->sighand->siglock, *flags);
 711}
 712
 713#ifdef CONFIG_LOCKDEP
 714extern void lockdep_assert_task_sighand_held(struct task_struct *task);
 715#else
 716static inline void lockdep_assert_task_sighand_held(struct task_struct *task) { }
 717#endif
 718
 719static inline unsigned long task_rlimit(const struct task_struct *task,
 720                unsigned int limit)
 721{
 722        return READ_ONCE(task->signal->rlim[limit].rlim_cur);
 723}
 724
 725static inline unsigned long task_rlimit_max(const struct task_struct *task,
 726                unsigned int limit)
 727{
 728        return READ_ONCE(task->signal->rlim[limit].rlim_max);
 729}
 730
 731static inline unsigned long rlimit(unsigned int limit)
 732{
 733        return task_rlimit(current, limit);
 734}
 735
 736static inline unsigned long rlimit_max(unsigned int limit)
 737{
 738        return task_rlimit_max(current, limit);
 739}
 740
 741#endif /* _LINUX_SCHED_SIGNAL_H */
 742