linux/include/linux/sched.h
<<
>>
Prefs
   1#ifndef _LINUX_SCHED_H
   2#define _LINUX_SCHED_H
   3
   4#include <uapi/linux/sched.h>
   5
   6#include <linux/sched/prio.h>
   7
   8
   9struct sched_param {
  10        int sched_priority;
  11};
  12
  13#include <asm/param.h>  /* for HZ */
  14
  15#include <linux/capability.h>
  16#include <linux/threads.h>
  17#include <linux/kernel.h>
  18#include <linux/types.h>
  19#include <linux/timex.h>
  20#include <linux/jiffies.h>
  21#include <linux/plist.h>
  22#include <linux/rbtree.h>
  23#include <linux/thread_info.h>
  24#include <linux/cpumask.h>
  25#include <linux/errno.h>
  26#include <linux/nodemask.h>
  27#include <linux/mm_types.h>
  28#include <linux/preempt_mask.h>
  29
  30#include <asm/page.h>
  31#include <asm/ptrace.h>
  32#include <linux/cputime.h>
  33
  34#include <linux/smp.h>
  35#include <linux/sem.h>
  36#include <linux/shm.h>
  37#include <linux/signal.h>
  38#include <linux/compiler.h>
  39#include <linux/completion.h>
  40#include <linux/pid.h>
  41#include <linux/percpu.h>
  42#include <linux/topology.h>
  43#include <linux/proportions.h>
  44#include <linux/seccomp.h>
  45#include <linux/rcupdate.h>
  46#include <linux/rculist.h>
  47#include <linux/rtmutex.h>
  48
  49#include <linux/time.h>
  50#include <linux/param.h>
  51#include <linux/resource.h>
  52#include <linux/timer.h>
  53#include <linux/hrtimer.h>
  54#include <linux/task_io_accounting.h>
  55#include <linux/latencytop.h>
  56#include <linux/cred.h>
  57#include <linux/llist.h>
  58#include <linux/uidgid.h>
  59#include <linux/gfp.h>
  60#include <linux/magic.h>
  61
  62#include <asm/processor.h>
  63
  64#define SCHED_ATTR_SIZE_VER0    48      /* sizeof first published struct */
  65
  66/*
  67 * Extended scheduling parameters data structure.
  68 *
  69 * This is needed because the original struct sched_param can not be
  70 * altered without introducing ABI issues with legacy applications
  71 * (e.g., in sched_getparam()).
  72 *
  73 * However, the possibility of specifying more than just a priority for
  74 * the tasks may be useful for a wide variety of application fields, e.g.,
  75 * multimedia, streaming, automation and control, and many others.
  76 *
  77 * This variant (sched_attr) is meant at describing a so-called
  78 * sporadic time-constrained task. In such model a task is specified by:
  79 *  - the activation period or minimum instance inter-arrival time;
  80 *  - the maximum (or average, depending on the actual scheduling
  81 *    discipline) computation time of all instances, a.k.a. runtime;
  82 *  - the deadline (relative to the actual activation time) of each
  83 *    instance.
  84 * Very briefly, a periodic (sporadic) task asks for the execution of
  85 * some specific computation --which is typically called an instance--
  86 * (at most) every period. Moreover, each instance typically lasts no more
  87 * than the runtime and must be completed by time instant t equal to
  88 * the instance activation time + the deadline.
  89 *
  90 * This is reflected by the actual fields of the sched_attr structure:
  91 *
  92 *  @size               size of the structure, for fwd/bwd compat.
  93 *
  94 *  @sched_policy       task's scheduling policy
  95 *  @sched_flags        for customizing the scheduler behaviour
  96 *  @sched_nice         task's nice value      (SCHED_NORMAL/BATCH)
  97 *  @sched_priority     task's static priority (SCHED_FIFO/RR)
  98 *  @sched_deadline     representative of the task's deadline
  99 *  @sched_runtime      representative of the task's runtime
 100 *  @sched_period       representative of the task's period
 101 *
 102 * Given this task model, there are a multiplicity of scheduling algorithms
 103 * and policies, that can be used to ensure all the tasks will make their
 104 * timing constraints.
 105 *
 106 * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the
 107 * only user of this new interface. More information about the algorithm
 108 * available in the scheduling class file or in Documentation/.
 109 */
 110struct sched_attr {
 111        u32 size;
 112
 113        u32 sched_policy;
 114        u64 sched_flags;
 115
 116        /* SCHED_NORMAL, SCHED_BATCH */
 117        s32 sched_nice;
 118
 119        /* SCHED_FIFO, SCHED_RR */
 120        u32 sched_priority;
 121
 122        /* SCHED_DEADLINE */
 123        u64 sched_runtime;
 124        u64 sched_deadline;
 125        u64 sched_period;
 126};
 127
 128struct exec_domain;
 129struct futex_pi_state;
 130struct robust_list_head;
 131struct bio_list;
 132struct fs_struct;
 133struct perf_event_context;
 134struct blk_plug;
 135struct filename;
 136
 137#define VMACACHE_BITS 2
 138#define VMACACHE_SIZE (1U << VMACACHE_BITS)
 139#define VMACACHE_MASK (VMACACHE_SIZE - 1)
 140
 141/*
 142 * These are the constant used to fake the fixed-point load-average
 143 * counting. Some notes:
 144 *  - 11 bit fractions expand to 22 bits by the multiplies: this gives
 145 *    a load-average precision of 10 bits integer + 11 bits fractional
 146 *  - if you want to count load-averages more often, you need more
 147 *    precision, or rounding will get you. With 2-second counting freq,
 148 *    the EXP_n values would be 1981, 2034 and 2043 if still using only
 149 *    11 bit fractions.
 150 */
 151extern unsigned long avenrun[];         /* Load averages */
 152extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
 153
 154#define FSHIFT          11              /* nr of bits of precision */
 155#define FIXED_1         (1<<FSHIFT)     /* 1.0 as fixed-point */
 156#define LOAD_FREQ       (5*HZ+1)        /* 5 sec intervals */
 157#define EXP_1           1884            /* 1/exp(5sec/1min) as fixed-point */
 158#define EXP_5           2014            /* 1/exp(5sec/5min) */
 159#define EXP_15          2037            /* 1/exp(5sec/15min) */
 160
 161#define CALC_LOAD(load,exp,n) \
 162        load *= exp; \
 163        load += n*(FIXED_1-exp); \
 164        load >>= FSHIFT;
 165
 166extern unsigned long total_forks;
 167extern int nr_threads;
 168DECLARE_PER_CPU(unsigned long, process_counts);
 169extern int nr_processes(void);
 170extern unsigned long nr_running(void);
 171extern bool single_task_running(void);
 172extern unsigned long nr_iowait(void);
 173extern unsigned long nr_iowait_cpu(int cpu);
 174extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
 175
 176extern void calc_global_load(unsigned long ticks);
 177extern void update_cpu_load_nohz(void);
 178
 179extern unsigned long get_parent_ip(unsigned long addr);
 180
 181extern void dump_cpu_task(int cpu);
 182
 183struct seq_file;
 184struct cfs_rq;
 185struct task_group;
 186#ifdef CONFIG_SCHED_DEBUG
 187extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
 188extern void proc_sched_set_task(struct task_struct *p);
 189extern void
 190print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
 191#endif
 192
 193/*
 194 * Task state bitmask. NOTE! These bits are also
 195 * encoded in fs/proc/array.c: get_task_state().
 196 *
 197 * We have two separate sets of flags: task->state
 198 * is about runnability, while task->exit_state are
 199 * about the task exiting. Confusing, but this way
 200 * modifying one set can't modify the other one by
 201 * mistake.
 202 */
 203#define TASK_RUNNING            0
 204#define TASK_INTERRUPTIBLE      1
 205#define TASK_UNINTERRUPTIBLE    2
 206#define __TASK_STOPPED          4
 207#define __TASK_TRACED           8
 208/* in tsk->exit_state */
 209#define EXIT_DEAD               16
 210#define EXIT_ZOMBIE             32
 211#define EXIT_TRACE              (EXIT_ZOMBIE | EXIT_DEAD)
 212/* in tsk->state again */
 213#define TASK_DEAD               64
 214#define TASK_WAKEKILL           128
 215#define TASK_WAKING             256
 216#define TASK_PARKED             512
 217#define TASK_STATE_MAX          1024
 218
 219#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
 220
 221extern char ___assert_task_state[1 - 2*!!(
 222                sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
 223
 224/* Convenience macros for the sake of set_task_state */
 225#define TASK_KILLABLE           (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
 226#define TASK_STOPPED            (TASK_WAKEKILL | __TASK_STOPPED)
 227#define TASK_TRACED             (TASK_WAKEKILL | __TASK_TRACED)
 228
 229/* Convenience macros for the sake of wake_up */
 230#define TASK_NORMAL             (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
 231#define TASK_ALL                (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
 232
 233/* get_task_state() */
 234#define TASK_REPORT             (TASK_RUNNING | TASK_INTERRUPTIBLE | \
 235                                 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
 236                                 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
 237
 238#define task_is_traced(task)    ((task->state & __TASK_TRACED) != 0)
 239#define task_is_stopped(task)   ((task->state & __TASK_STOPPED) != 0)
 240#define task_is_stopped_or_traced(task) \
 241                        ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 242#define task_contributes_to_load(task)  \
 243                                ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
 244                                 (task->flags & PF_FROZEN) == 0)
 245
 246#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 247
 248#define __set_task_state(tsk, state_value)                      \
 249        do {                                                    \
 250                (tsk)->task_state_change = _THIS_IP_;           \
 251                (tsk)->state = (state_value);                   \
 252        } while (0)
 253#define set_task_state(tsk, state_value)                        \
 254        do {                                                    \
 255                (tsk)->task_state_change = _THIS_IP_;           \
 256                set_mb((tsk)->state, (state_value));            \
 257        } while (0)
 258
 259/*
 260 * set_current_state() includes a barrier so that the write of current->state
 261 * is correctly serialised wrt the caller's subsequent test of whether to
 262 * actually sleep:
 263 *
 264 *      set_current_state(TASK_UNINTERRUPTIBLE);
 265 *      if (do_i_need_to_sleep())
 266 *              schedule();
 267 *
 268 * If the caller does not need such serialisation then use __set_current_state()
 269 */
 270#define __set_current_state(state_value)                        \
 271        do {                                                    \
 272                current->task_state_change = _THIS_IP_;         \
 273                current->state = (state_value);                 \
 274        } while (0)
 275#define set_current_state(state_value)                          \
 276        do {                                                    \
 277                current->task_state_change = _THIS_IP_;         \
 278                set_mb(current->state, (state_value));          \
 279        } while (0)
 280
 281#else
 282
 283#define __set_task_state(tsk, state_value)              \
 284        do { (tsk)->state = (state_value); } while (0)
 285#define set_task_state(tsk, state_value)                \
 286        set_mb((tsk)->state, (state_value))
 287
 288/*
 289 * set_current_state() includes a barrier so that the write of current->state
 290 * is correctly serialised wrt the caller's subsequent test of whether to
 291 * actually sleep:
 292 *
 293 *      set_current_state(TASK_UNINTERRUPTIBLE);
 294 *      if (do_i_need_to_sleep())
 295 *              schedule();
 296 *
 297 * If the caller does not need such serialisation then use __set_current_state()
 298 */
 299#define __set_current_state(state_value)                \
 300        do { current->state = (state_value); } while (0)
 301#define set_current_state(state_value)                  \
 302        set_mb(current->state, (state_value))
 303
 304#endif
 305
 306/* Task command name length */
 307#define TASK_COMM_LEN 16
 308
 309#include <linux/spinlock.h>
 310
 311/*
 312 * This serializes "schedule()" and also protects
 313 * the run-queue from deletions/modifications (but
 314 * _adding_ to the beginning of the run-queue has
 315 * a separate lock).
 316 */
 317extern rwlock_t tasklist_lock;
 318extern spinlock_t mmlist_lock;
 319
 320struct task_struct;
 321
 322#ifdef CONFIG_PROVE_RCU
 323extern int lockdep_tasklist_lock_is_held(void);
 324#endif /* #ifdef CONFIG_PROVE_RCU */
 325
 326extern void sched_init(void);
 327extern void sched_init_smp(void);
 328extern asmlinkage void schedule_tail(struct task_struct *prev);
 329extern void init_idle(struct task_struct *idle, int cpu);
 330extern void init_idle_bootup_task(struct task_struct *idle);
 331
 332extern int runqueue_is_locked(int cpu);
 333
 334#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 335extern void nohz_balance_enter_idle(int cpu);
 336extern void set_cpu_sd_state_idle(void);
 337extern int get_nohz_timer_target(int pinned);
 338#else
 339static inline void nohz_balance_enter_idle(int cpu) { }
 340static inline void set_cpu_sd_state_idle(void) { }
 341static inline int get_nohz_timer_target(int pinned)
 342{
 343        return smp_processor_id();
 344}
 345#endif
 346
 347/*
 348 * Only dump TASK_* tasks. (0 for all tasks)
 349 */
 350extern void show_state_filter(unsigned long state_filter);
 351
 352static inline void show_state(void)
 353{
 354        show_state_filter(0);
 355}
 356
 357extern void show_regs(struct pt_regs *);
 358
 359/*
 360 * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
 361 * task), SP is the stack pointer of the first frame that should be shown in the back
 362 * trace (or NULL if the entire call-chain of the task should be shown).
 363 */
 364extern void show_stack(struct task_struct *task, unsigned long *sp);
 365
 366void io_schedule(void);
 367long io_schedule_timeout(long timeout);
 368
 369extern void cpu_init (void);
 370extern void trap_init(void);
 371extern void update_process_times(int user);
 372extern void scheduler_tick(void);
 373
 374extern void sched_show_task(struct task_struct *p);
 375
 376#ifdef CONFIG_LOCKUP_DETECTOR
 377extern void touch_softlockup_watchdog(void);
 378extern void touch_softlockup_watchdog_sync(void);
 379extern void touch_all_softlockup_watchdogs(void);
 380extern int proc_dowatchdog_thresh(struct ctl_table *table, int write,
 381                                  void __user *buffer,
 382                                  size_t *lenp, loff_t *ppos);
 383extern unsigned int  softlockup_panic;
 384void lockup_detector_init(void);
 385#else
 386static inline void touch_softlockup_watchdog(void)
 387{
 388}
 389static inline void touch_softlockup_watchdog_sync(void)
 390{
 391}
 392static inline void touch_all_softlockup_watchdogs(void)
 393{
 394}
 395static inline void lockup_detector_init(void)
 396{
 397}
 398#endif
 399
 400#ifdef CONFIG_DETECT_HUNG_TASK
 401void reset_hung_task_detector(void);
 402#else
 403static inline void reset_hung_task_detector(void)
 404{
 405}
 406#endif
 407
 408/* Attach to any functions which should be ignored in wchan output. */
 409#define __sched         __attribute__((__section__(".sched.text")))
 410
 411/* Linker adds these: start and end of __sched functions */
 412extern char __sched_text_start[], __sched_text_end[];
 413
 414/* Is this address in the __sched functions? */
 415extern int in_sched_functions(unsigned long addr);
 416
 417#define MAX_SCHEDULE_TIMEOUT    LONG_MAX
 418extern signed long schedule_timeout(signed long timeout);
 419extern signed long schedule_timeout_interruptible(signed long timeout);
 420extern signed long schedule_timeout_killable(signed long timeout);
 421extern signed long schedule_timeout_uninterruptible(signed long timeout);
 422asmlinkage void schedule(void);
 423extern void schedule_preempt_disabled(void);
 424
 425struct nsproxy;
 426struct user_namespace;
 427
 428#ifdef CONFIG_MMU
 429extern void arch_pick_mmap_layout(struct mm_struct *mm);
 430extern unsigned long
 431arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
 432                       unsigned long, unsigned long);
 433extern unsigned long
 434arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
 435                          unsigned long len, unsigned long pgoff,
 436                          unsigned long flags);
 437#else
 438static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
 439#endif
 440
 441#define SUID_DUMP_DISABLE       0       /* No setuid dumping */
 442#define SUID_DUMP_USER          1       /* Dump as user of process */
 443#define SUID_DUMP_ROOT          2       /* Dump as root */
 444
 445/* mm flags */
 446
 447/* for SUID_DUMP_* above */
 448#define MMF_DUMPABLE_BITS 2
 449#define MMF_DUMPABLE_MASK ((1 << MMF_DUMPABLE_BITS) - 1)
 450
 451extern void set_dumpable(struct mm_struct *mm, int value);
 452/*
 453 * This returns the actual value of the suid_dumpable flag. For things
 454 * that are using this for checking for privilege transitions, it must
 455 * test against SUID_DUMP_USER rather than treating it as a boolean
 456 * value.
 457 */
 458static inline int __get_dumpable(unsigned long mm_flags)
 459{
 460        return mm_flags & MMF_DUMPABLE_MASK;
 461}
 462
 463static inline int get_dumpable(struct mm_struct *mm)
 464{
 465        return __get_dumpable(mm->flags);
 466}
 467
 468/* coredump filter bits */
 469#define MMF_DUMP_ANON_PRIVATE   2
 470#define MMF_DUMP_ANON_SHARED    3
 471#define MMF_DUMP_MAPPED_PRIVATE 4
 472#define MMF_DUMP_MAPPED_SHARED  5
 473#define MMF_DUMP_ELF_HEADERS    6
 474#define MMF_DUMP_HUGETLB_PRIVATE 7
 475#define MMF_DUMP_HUGETLB_SHARED  8
 476
 477#define MMF_DUMP_FILTER_SHIFT   MMF_DUMPABLE_BITS
 478#define MMF_DUMP_FILTER_BITS    7
 479#define MMF_DUMP_FILTER_MASK \
 480        (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
 481#define MMF_DUMP_FILTER_DEFAULT \
 482        ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
 483         (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
 484
 485#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
 486# define MMF_DUMP_MASK_DEFAULT_ELF      (1 << MMF_DUMP_ELF_HEADERS)
 487#else
 488# define MMF_DUMP_MASK_DEFAULT_ELF      0
 489#endif
 490                                        /* leave room for more dump flags */
 491#define MMF_VM_MERGEABLE        16      /* KSM may merge identical pages */
 492#define MMF_VM_HUGEPAGE         17      /* set when VM_HUGEPAGE is set on vma */
 493#define MMF_EXE_FILE_CHANGED    18      /* see prctl_set_mm_exe_file() */
 494
 495#define MMF_HAS_UPROBES         19      /* has uprobes */
 496#define MMF_RECALC_UPROBES      20      /* MMF_HAS_UPROBES can be wrong */
 497
 498#define MMF_INIT_MASK           (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
 499
 500struct sighand_struct {
 501        atomic_t                count;
 502        struct k_sigaction      action[_NSIG];
 503        spinlock_t              siglock;
 504        wait_queue_head_t       signalfd_wqh;
 505};
 506
 507struct pacct_struct {
 508        int                     ac_flag;
 509        long                    ac_exitcode;
 510        unsigned long           ac_mem;
 511        cputime_t               ac_utime, ac_stime;
 512        unsigned long           ac_minflt, ac_majflt;
 513};
 514
 515struct cpu_itimer {
 516        cputime_t expires;
 517        cputime_t incr;
 518        u32 error;
 519        u32 incr_error;
 520};
 521
 522/**
 523 * struct cputime - snaphsot of system and user cputime
 524 * @utime: time spent in user mode
 525 * @stime: time spent in system mode
 526 *
 527 * Gathers a generic snapshot of user and system time.
 528 */
 529struct cputime {
 530        cputime_t utime;
 531        cputime_t stime;
 532};
 533
 534/**
 535 * struct task_cputime - collected CPU time counts
 536 * @utime:              time spent in user mode, in &cputime_t units
 537 * @stime:              time spent in kernel mode, in &cputime_t units
 538 * @sum_exec_runtime:   total time spent on the CPU, in nanoseconds
 539 *
 540 * This is an extension of struct cputime that includes the total runtime
 541 * spent by the task from the scheduler point of view.
 542 *
 543 * As a result, this structure groups together three kinds of CPU time
 544 * that are tracked for threads and thread groups.  Most things considering
 545 * CPU time want to group these counts together and treat all three
 546 * of them in parallel.
 547 */
 548struct task_cputime {
 549        cputime_t utime;
 550        cputime_t stime;
 551        unsigned long long sum_exec_runtime;
 552};
 553/* Alternate field names when used to cache expirations. */
 554#define prof_exp        stime
 555#define virt_exp        utime
 556#define sched_exp       sum_exec_runtime
 557
 558#define INIT_CPUTIME    \
 559        (struct task_cputime) {                                 \
 560                .utime = 0,                                     \
 561                .stime = 0,                                     \
 562                .sum_exec_runtime = 0,                          \
 563        }
 564
 565#ifdef CONFIG_PREEMPT_COUNT
 566#define PREEMPT_DISABLED        (1 + PREEMPT_ENABLED)
 567#else
 568#define PREEMPT_DISABLED        PREEMPT_ENABLED
 569#endif
 570
 571/*
 572 * Disable preemption until the scheduler is running.
 573 * Reset by start_kernel()->sched_init()->init_idle().
 574 *
 575 * We include PREEMPT_ACTIVE to avoid cond_resched() from working
 576 * before the scheduler is active -- see should_resched().
 577 */
 578#define INIT_PREEMPT_COUNT      (PREEMPT_DISABLED + PREEMPT_ACTIVE)
 579
 580/**
 581 * struct thread_group_cputimer - thread group interval timer counts
 582 * @cputime:            thread group interval timers.
 583 * @running:            non-zero when there are timers running and
 584 *                      @cputime receives updates.
 585 * @lock:               lock for fields in this struct.
 586 *
 587 * This structure contains the version of task_cputime, above, that is
 588 * used for thread group CPU timer calculations.
 589 */
 590struct thread_group_cputimer {
 591        struct task_cputime cputime;
 592        int running;
 593        raw_spinlock_t lock;
 594};
 595
 596#include <linux/rwsem.h>
 597struct autogroup;
 598
 599/*
 600 * NOTE! "signal_struct" does not have its own
 601 * locking, because a shared signal_struct always
 602 * implies a shared sighand_struct, so locking
 603 * sighand_struct is always a proper superset of
 604 * the locking of signal_struct.
 605 */
 606struct signal_struct {
 607        atomic_t                sigcnt;
 608        atomic_t                live;
 609        int                     nr_threads;
 610        struct list_head        thread_head;
 611
 612        wait_queue_head_t       wait_chldexit;  /* for wait4() */
 613
 614        /* current thread group signal load-balancing target: */
 615        struct task_struct      *curr_target;
 616
 617        /* shared signal handling: */
 618        struct sigpending       shared_pending;
 619
 620        /* thread group exit support */
 621        int                     group_exit_code;
 622        /* overloaded:
 623         * - notify group_exit_task when ->count is equal to notify_count
 624         * - everyone except group_exit_task is stopped during signal delivery
 625         *   of fatal signals, group_exit_task processes the signal.
 626         */
 627        int                     notify_count;
 628        struct task_struct      *group_exit_task;
 629
 630        /* thread group stop support, overloads group_exit_code too */
 631        int                     group_stop_count;
 632        unsigned int            flags; /* see SIGNAL_* flags below */
 633
 634        /*
 635         * PR_SET_CHILD_SUBREAPER marks a process, like a service
 636         * manager, to re-parent orphan (double-forking) child processes
 637         * to this process instead of 'init'. The service manager is
 638         * able to receive SIGCHLD signals and is able to investigate
 639         * the process until it calls wait(). All children of this
 640         * process will inherit a flag if they should look for a
 641         * child_subreaper process at exit.
 642         */
 643        unsigned int            is_child_subreaper:1;
 644        unsigned int            has_child_subreaper:1;
 645
 646        /* POSIX.1b Interval Timers */
 647        int                     posix_timer_id;
 648        struct list_head        posix_timers;
 649
 650        /* ITIMER_REAL timer for the process */
 651        struct hrtimer real_timer;
 652        struct pid *leader_pid;
 653        ktime_t it_real_incr;
 654
 655        /*
 656         * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
 657         * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
 658         * values are defined to 0 and 1 respectively
 659         */
 660        struct cpu_itimer it[2];
 661
 662        /*
 663         * Thread group totals for process CPU timers.
 664         * See thread_group_cputimer(), et al, for details.
 665         */
 666        struct thread_group_cputimer cputimer;
 667
 668        /* Earliest-expiration cache. */
 669        struct task_cputime cputime_expires;
 670
 671        struct list_head cpu_timers[3];
 672
 673        struct pid *tty_old_pgrp;
 674
 675        /* boolean value for session group leader */
 676        int leader;
 677
 678        struct tty_struct *tty; /* NULL if no tty */
 679
 680#ifdef CONFIG_SCHED_AUTOGROUP
 681        struct autogroup *autogroup;
 682#endif
 683        /*
 684         * Cumulative resource counters for dead threads in the group,
 685         * and for reaped dead child processes forked by this group.
 686         * Live threads maintain their own counters and add to these
 687         * in __exit_signal, except for the group leader.
 688         */
 689        seqlock_t stats_lock;
 690        cputime_t utime, stime, cutime, cstime;
 691        cputime_t gtime;
 692        cputime_t cgtime;
 693#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 694        struct cputime prev_cputime;
 695#endif
 696        unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
 697        unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
 698        unsigned long inblock, oublock, cinblock, coublock;
 699        unsigned long maxrss, cmaxrss;
 700        struct task_io_accounting ioac;
 701
 702        /*
 703         * Cumulative ns of schedule CPU time fo dead threads in the
 704         * group, not including a zombie group leader, (This only differs
 705         * from jiffies_to_ns(utime + stime) if sched_clock uses something
 706         * other than jiffies.)
 707         */
 708        unsigned long long sum_sched_runtime;
 709
 710        /*
 711         * We don't bother to synchronize most readers of this at all,
 712         * because there is no reader checking a limit that actually needs
 713         * to get both rlim_cur and rlim_max atomically, and either one
 714         * alone is a single word that can safely be read normally.
 715         * getrlimit/setrlimit use task_lock(current->group_leader) to
 716         * protect this instead of the siglock, because they really
 717         * have no need to disable irqs.
 718         */
 719        struct rlimit rlim[RLIM_NLIMITS];
 720
 721#ifdef CONFIG_BSD_PROCESS_ACCT
 722        struct pacct_struct pacct;      /* per-process accounting information */
 723#endif
 724#ifdef CONFIG_TASKSTATS
 725        struct taskstats *stats;
 726#endif
 727#ifdef CONFIG_AUDIT
 728        unsigned audit_tty;
 729        unsigned audit_tty_log_passwd;
 730        struct tty_audit_buf *tty_audit_buf;
 731#endif
 732#ifdef CONFIG_CGROUPS
 733        /*
 734         * group_rwsem prevents new tasks from entering the threadgroup and
 735         * member tasks from exiting,a more specifically, setting of
 736         * PF_EXITING.  fork and exit paths are protected with this rwsem
 737         * using threadgroup_change_begin/end().  Users which require
 738         * threadgroup to remain stable should use threadgroup_[un]lock()
 739         * which also takes care of exec path.  Currently, cgroup is the
 740         * only user.
 741         */
 742        struct rw_semaphore group_rwsem;
 743#endif
 744
 745        oom_flags_t oom_flags;
 746        short oom_score_adj;            /* OOM kill score adjustment */
 747        short oom_score_adj_min;        /* OOM kill score adjustment min value.
 748                                         * Only settable by CAP_SYS_RESOURCE. */
 749
 750        struct mutex cred_guard_mutex;  /* guard against foreign influences on
 751                                         * credential calculations
 752                                         * (notably. ptrace) */
 753};
 754
 755/*
 756 * Bits in flags field of signal_struct.
 757 */
 758#define SIGNAL_STOP_STOPPED     0x00000001 /* job control stop in effect */
 759#define SIGNAL_STOP_CONTINUED   0x00000002 /* SIGCONT since WCONTINUED reap */
 760#define SIGNAL_GROUP_EXIT       0x00000004 /* group exit in progress */
 761#define SIGNAL_GROUP_COREDUMP   0x00000008 /* coredump in progress */
 762/*
 763 * Pending notifications to parent.
 764 */
 765#define SIGNAL_CLD_STOPPED      0x00000010
 766#define SIGNAL_CLD_CONTINUED    0x00000020
 767#define SIGNAL_CLD_MASK         (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
 768
 769#define SIGNAL_UNKILLABLE       0x00000040 /* for init: ignore fatal signals */
 770
 771/* If true, all threads except ->group_exit_task have pending SIGKILL */
 772static inline int signal_group_exit(const struct signal_struct *sig)
 773{
 774        return  (sig->flags & SIGNAL_GROUP_EXIT) ||
 775                (sig->group_exit_task != NULL);
 776}
 777
 778/*
 779 * Some day this will be a full-fledged user tracking system..
 780 */
 781struct user_struct {
 782        atomic_t __count;       /* reference count */
 783        atomic_t processes;     /* How many processes does this user have? */
 784        atomic_t sigpending;    /* How many pending signals does this user have? */
 785#ifdef CONFIG_INOTIFY_USER
 786        atomic_t inotify_watches; /* How many inotify watches does this user have? */
 787        atomic_t inotify_devs;  /* How many inotify devs does this user have opened? */
 788#endif
 789#ifdef CONFIG_FANOTIFY
 790        atomic_t fanotify_listeners;
 791#endif
 792#ifdef CONFIG_EPOLL
 793        atomic_long_t epoll_watches; /* The number of file descriptors currently watched */
 794#endif
 795#ifdef CONFIG_POSIX_MQUEUE
 796        /* protected by mq_lock */
 797        unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
 798#endif
 799        unsigned long locked_shm; /* How many pages of mlocked shm ? */
 800
 801#ifdef CONFIG_KEYS
 802        struct key *uid_keyring;        /* UID specific keyring */
 803        struct key *session_keyring;    /* UID's default session keyring */
 804#endif
 805
 806        /* Hash table maintenance information */
 807        struct hlist_node uidhash_node;
 808        kuid_t uid;
 809
 810#ifdef CONFIG_PERF_EVENTS
 811        atomic_long_t locked_vm;
 812#endif
 813};
 814
 815extern int uids_sysfs_init(void);
 816
 817extern struct user_struct *find_user(kuid_t);
 818
 819extern struct user_struct root_user;
 820#define INIT_USER (&root_user)
 821
 822
 823struct backing_dev_info;
 824struct reclaim_state;
 825
 826#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
 827struct sched_info {
 828        /* cumulative counters */
 829        unsigned long pcount;         /* # of times run on this cpu */
 830        unsigned long long run_delay; /* time spent waiting on a runqueue */
 831
 832        /* timestamps */
 833        unsigned long long last_arrival,/* when we last ran on a cpu */
 834                           last_queued; /* when we were last queued to run */
 835};
 836#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
 837
 838#ifdef CONFIG_TASK_DELAY_ACCT
 839struct task_delay_info {
 840        spinlock_t      lock;
 841        unsigned int    flags;  /* Private per-task flags */
 842
 843        /* For each stat XXX, add following, aligned appropriately
 844         *
 845         * struct timespec XXX_start, XXX_end;
 846         * u64 XXX_delay;
 847         * u32 XXX_count;
 848         *
 849         * Atomicity of updates to XXX_delay, XXX_count protected by
 850         * single lock above (split into XXX_lock if contention is an issue).
 851         */
 852
 853        /*
 854         * XXX_count is incremented on every XXX operation, the delay
 855         * associated with the operation is added to XXX_delay.
 856         * XXX_delay contains the accumulated delay time in nanoseconds.
 857         */
 858        u64 blkio_start;        /* Shared by blkio, swapin */
 859        u64 blkio_delay;        /* wait for sync block io completion */
 860        u64 swapin_delay;       /* wait for swapin block io completion */
 861        u32 blkio_count;        /* total count of the number of sync block */
 862                                /* io operations performed */
 863        u32 swapin_count;       /* total count of the number of swapin block */
 864                                /* io operations performed */
 865
 866        u64 freepages_start;
 867        u64 freepages_delay;    /* wait for memory reclaim */
 868        u32 freepages_count;    /* total count of memory reclaim */
 869};
 870#endif  /* CONFIG_TASK_DELAY_ACCT */
 871
 872static inline int sched_info_on(void)
 873{
 874#ifdef CONFIG_SCHEDSTATS
 875        return 1;
 876#elif defined(CONFIG_TASK_DELAY_ACCT)
 877        extern int delayacct_on;
 878        return delayacct_on;
 879#else
 880        return 0;
 881#endif
 882}
 883
 884enum cpu_idle_type {
 885        CPU_IDLE,
 886        CPU_NOT_IDLE,
 887        CPU_NEWLY_IDLE,
 888        CPU_MAX_IDLE_TYPES
 889};
 890
 891/*
 892 * Increase resolution of cpu_capacity calculations
 893 */
 894#define SCHED_CAPACITY_SHIFT    10
 895#define SCHED_CAPACITY_SCALE    (1L << SCHED_CAPACITY_SHIFT)
 896
 897/*
 898 * sched-domains (multiprocessor balancing) declarations:
 899 */
 900#ifdef CONFIG_SMP
 901#define SD_LOAD_BALANCE         0x0001  /* Do load balancing on this domain. */
 902#define SD_BALANCE_NEWIDLE      0x0002  /* Balance when about to become idle */
 903#define SD_BALANCE_EXEC         0x0004  /* Balance on exec */
 904#define SD_BALANCE_FORK         0x0008  /* Balance on fork, clone */
 905#define SD_BALANCE_WAKE         0x0010  /* Balance on wakeup */
 906#define SD_WAKE_AFFINE          0x0020  /* Wake task to waking CPU */
 907#define SD_SHARE_CPUCAPACITY    0x0080  /* Domain members share cpu power */
 908#define SD_SHARE_POWERDOMAIN    0x0100  /* Domain members share power domain */
 909#define SD_SHARE_PKG_RESOURCES  0x0200  /* Domain members share cpu pkg resources */
 910#define SD_SERIALIZE            0x0400  /* Only a single load balancing instance */
 911#define SD_ASYM_PACKING         0x0800  /* Place busy groups earlier in the domain */
 912#define SD_PREFER_SIBLING       0x1000  /* Prefer to place tasks in a sibling domain */
 913#define SD_OVERLAP              0x2000  /* sched_domains of this level overlap */
 914#define SD_NUMA                 0x4000  /* cross-node balancing */
 915
 916#ifdef CONFIG_SCHED_SMT
 917static inline int cpu_smt_flags(void)
 918{
 919        return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
 920}
 921#endif
 922
 923#ifdef CONFIG_SCHED_MC
 924static inline int cpu_core_flags(void)
 925{
 926        return SD_SHARE_PKG_RESOURCES;
 927}
 928#endif
 929
 930#ifdef CONFIG_NUMA
 931static inline int cpu_numa_flags(void)
 932{
 933        return SD_NUMA;
 934}
 935#endif
 936
 937struct sched_domain_attr {
 938        int relax_domain_level;
 939};
 940
 941#define SD_ATTR_INIT    (struct sched_domain_attr) {    \
 942        .relax_domain_level = -1,                       \
 943}
 944
 945extern int sched_domain_level_max;
 946
 947struct sched_group;
 948
 949struct sched_domain {
 950        /* These fields must be setup */
 951        struct sched_domain *parent;    /* top domain must be null terminated */
 952        struct sched_domain *child;     /* bottom domain must be null terminated */
 953        struct sched_group *groups;     /* the balancing groups of the domain */
 954        unsigned long min_interval;     /* Minimum balance interval ms */
 955        unsigned long max_interval;     /* Maximum balance interval ms */
 956        unsigned int busy_factor;       /* less balancing by factor if busy */
 957        unsigned int imbalance_pct;     /* No balance until over watermark */
 958        unsigned int cache_nice_tries;  /* Leave cache hot tasks for # tries */
 959        unsigned int busy_idx;
 960        unsigned int idle_idx;
 961        unsigned int newidle_idx;
 962        unsigned int wake_idx;
 963        unsigned int forkexec_idx;
 964        unsigned int smt_gain;
 965
 966        int nohz_idle;                  /* NOHZ IDLE status */
 967        int flags;                      /* See SD_* */
 968        int level;
 969
 970        /* Runtime fields. */
 971        unsigned long last_balance;     /* init to jiffies. units in jiffies */
 972        unsigned int balance_interval;  /* initialise to 1. units in ms. */
 973        unsigned int nr_balance_failed; /* initialise to 0 */
 974
 975        /* idle_balance() stats */
 976        u64 max_newidle_lb_cost;
 977        unsigned long next_decay_max_lb_cost;
 978
 979#ifdef CONFIG_SCHEDSTATS
 980        /* load_balance() stats */
 981        unsigned int lb_count[CPU_MAX_IDLE_TYPES];
 982        unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
 983        unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
 984        unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
 985        unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
 986        unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
 987        unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
 988        unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
 989
 990        /* Active load balancing */
 991        unsigned int alb_count;
 992        unsigned int alb_failed;
 993        unsigned int alb_pushed;
 994
 995        /* SD_BALANCE_EXEC stats */
 996        unsigned int sbe_count;
 997        unsigned int sbe_balanced;
 998        unsigned int sbe_pushed;
 999
1000        /* SD_BALANCE_FORK stats */
1001        unsigned int sbf_count;
1002        unsigned int sbf_balanced;
1003        unsigned int sbf_pushed;
1004
1005        /* try_to_wake_up() stats */
1006        unsigned int ttwu_wake_remote;
1007        unsigned int ttwu_move_affine;
1008        unsigned int ttwu_move_balance;
1009#endif
1010#ifdef CONFIG_SCHED_DEBUG
1011        char *name;
1012#endif
1013        union {
1014                void *private;          /* used during construction */
1015                struct rcu_head rcu;    /* used during destruction */
1016        };
1017
1018        unsigned int span_weight;
1019        /*
1020         * Span of all CPUs in this domain.
1021         *
1022         * NOTE: this field is variable length. (Allocated dynamically
1023         * by attaching extra space to the end of the structure,
1024         * depending on how many CPUs the kernel has booted up with)
1025         */
1026        unsigned long span[0];
1027};
1028
1029static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
1030{
1031        return to_cpumask(sd->span);
1032}
1033
1034extern void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1035                                    struct sched_domain_attr *dattr_new);
1036
1037/* Allocate an array of sched domains, for partition_sched_domains(). */
1038cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
1039void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
1040
1041bool cpus_share_cache(int this_cpu, int that_cpu);
1042
1043typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1044typedef int (*sched_domain_flags_f)(void);
1045
1046#define SDTL_OVERLAP    0x01
1047
1048struct sd_data {
1049        struct sched_domain **__percpu sd;
1050        struct sched_group **__percpu sg;
1051        struct sched_group_capacity **__percpu sgc;
1052};
1053
1054struct sched_domain_topology_level {
1055        sched_domain_mask_f mask;
1056        sched_domain_flags_f sd_flags;
1057        int                 flags;
1058        int                 numa_level;
1059        struct sd_data      data;
1060#ifdef CONFIG_SCHED_DEBUG
1061        char                *name;
1062#endif
1063};
1064
1065extern struct sched_domain_topology_level *sched_domain_topology;
1066
1067extern void set_sched_topology(struct sched_domain_topology_level *tl);
1068extern void wake_up_if_idle(int cpu);
1069
1070#ifdef CONFIG_SCHED_DEBUG
1071# define SD_INIT_NAME(type)             .name = #type
1072#else
1073# define SD_INIT_NAME(type)
1074#endif
1075
1076#else /* CONFIG_SMP */
1077
1078struct sched_domain_attr;
1079
1080static inline void
1081partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
1082                        struct sched_domain_attr *dattr_new)
1083{
1084}
1085
1086static inline bool cpus_share_cache(int this_cpu, int that_cpu)
1087{
1088        return true;
1089}
1090
1091#endif  /* !CONFIG_SMP */
1092
1093
1094struct io_context;                      /* See blkdev.h */
1095
1096
1097#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
1098extern void prefetch_stack(struct task_struct *t);
1099#else
1100static inline void prefetch_stack(struct task_struct *t) { }
1101#endif
1102
1103struct audit_context;           /* See audit.c */
1104struct mempolicy;
1105struct pipe_inode_info;
1106struct uts_namespace;
1107
1108struct load_weight {
1109        unsigned long weight;
1110        u32 inv_weight;
1111};
1112
1113struct sched_avg {
1114        /*
1115         * These sums represent an infinite geometric series and so are bound
1116         * above by 1024/(1-y).  Thus we only need a u32 to store them for all
1117         * choices of y < 1-2^(-32)*1024.
1118         */
1119        u32 runnable_avg_sum, runnable_avg_period;
1120        u64 last_runnable_update;
1121        s64 decay_count;
1122        unsigned long load_avg_contrib;
1123};
1124
1125#ifdef CONFIG_SCHEDSTATS
1126struct sched_statistics {
1127        u64                     wait_start;
1128        u64                     wait_max;
1129        u64                     wait_count;
1130        u64                     wait_sum;
1131        u64                     iowait_count;
1132        u64                     iowait_sum;
1133
1134        u64                     sleep_start;
1135        u64                     sleep_max;
1136        s64                     sum_sleep_runtime;
1137
1138        u64                     block_start;
1139        u64                     block_max;
1140        u64                     exec_max;
1141        u64                     slice_max;
1142
1143        u64                     nr_migrations_cold;
1144        u64                     nr_failed_migrations_affine;
1145        u64                     nr_failed_migrations_running;
1146        u64                     nr_failed_migrations_hot;
1147        u64                     nr_forced_migrations;
1148
1149        u64                     nr_wakeups;
1150        u64                     nr_wakeups_sync;
1151        u64                     nr_wakeups_migrate;
1152        u64                     nr_wakeups_local;
1153        u64                     nr_wakeups_remote;
1154        u64                     nr_wakeups_affine;
1155        u64                     nr_wakeups_affine_attempts;
1156        u64                     nr_wakeups_passive;
1157        u64                     nr_wakeups_idle;
1158};
1159#endif
1160
1161struct sched_entity {
1162        struct load_weight      load;           /* for load-balancing */
1163        struct rb_node          run_node;
1164        struct list_head        group_node;
1165        unsigned int            on_rq;
1166
1167        u64                     exec_start;
1168        u64                     sum_exec_runtime;
1169        u64                     vruntime;
1170        u64                     prev_sum_exec_runtime;
1171
1172        u64                     nr_migrations;
1173
1174#ifdef CONFIG_SCHEDSTATS
1175        struct sched_statistics statistics;
1176#endif
1177
1178#ifdef CONFIG_FAIR_GROUP_SCHED
1179        int                     depth;
1180        struct sched_entity     *parent;
1181        /* rq on which this entity is (to be) queued: */
1182        struct cfs_rq           *cfs_rq;
1183        /* rq "owned" by this entity/group: */
1184        struct cfs_rq           *my_q;
1185#endif
1186
1187#ifdef CONFIG_SMP
1188        /* Per-entity load-tracking */
1189        struct sched_avg        avg;
1190#endif
1191};
1192
1193struct sched_rt_entity {
1194        struct list_head run_list;
1195        unsigned long timeout;
1196        unsigned long watchdog_stamp;
1197        unsigned int time_slice;
1198
1199        struct sched_rt_entity *back;
1200#ifdef CONFIG_RT_GROUP_SCHED
1201        struct sched_rt_entity  *parent;
1202        /* rq on which this entity is (to be) queued: */
1203        struct rt_rq            *rt_rq;
1204        /* rq "owned" by this entity/group: */
1205        struct rt_rq            *my_q;
1206#endif
1207};
1208
1209struct sched_dl_entity {
1210        struct rb_node  rb_node;
1211
1212        /*
1213         * Original scheduling parameters. Copied here from sched_attr
1214         * during sched_setattr(), they will remain the same until
1215         * the next sched_setattr().
1216         */
1217        u64 dl_runtime;         /* maximum runtime for each instance    */
1218        u64 dl_deadline;        /* relative deadline of each instance   */
1219        u64 dl_period;          /* separation of two instances (period) */
1220        u64 dl_bw;              /* dl_runtime / dl_deadline             */
1221
1222        /*
1223         * Actual scheduling parameters. Initialized with the values above,
1224         * they are continously updated during task execution. Note that
1225         * the remaining runtime could be < 0 in case we are in overrun.
1226         */
1227        s64 runtime;            /* remaining runtime for this instance  */
1228        u64 deadline;           /* absolute deadline for this instance  */
1229        unsigned int flags;     /* specifying the scheduler behaviour   */
1230
1231        /*
1232         * Some bool flags:
1233         *
1234         * @dl_throttled tells if we exhausted the runtime. If so, the
1235         * task has to wait for a replenishment to be performed at the
1236         * next firing of dl_timer.
1237         *
1238         * @dl_new tells if a new instance arrived. If so we must
1239         * start executing it with full runtime and reset its absolute
1240         * deadline;
1241         *
1242         * @dl_boosted tells if we are boosted due to DI. If so we are
1243         * outside bandwidth enforcement mechanism (but only until we
1244         * exit the critical section);
1245         *
1246         * @dl_yielded tells if task gave up the cpu before consuming
1247         * all its available runtime during the last job.
1248         */
1249        int dl_throttled, dl_new, dl_boosted, dl_yielded;
1250
1251        /*
1252         * Bandwidth enforcement timer. Each -deadline task has its
1253         * own bandwidth to be enforced, thus we need one timer per task.
1254         */
1255        struct hrtimer dl_timer;
1256};
1257
1258union rcu_special {
1259        struct {
1260                bool blocked;
1261                bool need_qs;
1262        } b;
1263        short s;
1264};
1265struct rcu_node;
1266
1267enum perf_event_task_context {
1268        perf_invalid_context = -1,
1269        perf_hw_context = 0,
1270        perf_sw_context,
1271        perf_nr_task_contexts,
1272};
1273
1274struct task_struct {
1275        volatile long state;    /* -1 unrunnable, 0 runnable, >0 stopped */
1276        void *stack;
1277        atomic_t usage;
1278        unsigned int flags;     /* per process flags, defined below */
1279        unsigned int ptrace;
1280
1281#ifdef CONFIG_SMP
1282        struct llist_node wake_entry;
1283        int on_cpu;
1284        struct task_struct *last_wakee;
1285        unsigned long wakee_flips;
1286        unsigned long wakee_flip_decay_ts;
1287
1288        int wake_cpu;
1289#endif
1290        int on_rq;
1291
1292        int prio, static_prio, normal_prio;
1293        unsigned int rt_priority;
1294        const struct sched_class *sched_class;
1295        struct sched_entity se;
1296        struct sched_rt_entity rt;
1297#ifdef CONFIG_CGROUP_SCHED
1298        struct task_group *sched_task_group;
1299#endif
1300        struct sched_dl_entity dl;
1301
1302#ifdef CONFIG_PREEMPT_NOTIFIERS
1303        /* list of struct preempt_notifier: */
1304        struct hlist_head preempt_notifiers;
1305#endif
1306
1307#ifdef CONFIG_BLK_DEV_IO_TRACE
1308        unsigned int btrace_seq;
1309#endif
1310
1311        unsigned int policy;
1312        int nr_cpus_allowed;
1313        cpumask_t cpus_allowed;
1314
1315#ifdef CONFIG_PREEMPT_RCU
1316        int rcu_read_lock_nesting;
1317        union rcu_special rcu_read_unlock_special;
1318        struct list_head rcu_node_entry;
1319#endif /* #ifdef CONFIG_PREEMPT_RCU */
1320#ifdef CONFIG_PREEMPT_RCU
1321        struct rcu_node *rcu_blocked_node;
1322#endif /* #ifdef CONFIG_PREEMPT_RCU */
1323#ifdef CONFIG_TASKS_RCU
1324        unsigned long rcu_tasks_nvcsw;
1325        bool rcu_tasks_holdout;
1326        struct list_head rcu_tasks_holdout_list;
1327        int rcu_tasks_idle_cpu;
1328#endif /* #ifdef CONFIG_TASKS_RCU */
1329
1330#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
1331        struct sched_info sched_info;
1332#endif
1333
1334        struct list_head tasks;
1335#ifdef CONFIG_SMP
1336        struct plist_node pushable_tasks;
1337        struct rb_node pushable_dl_tasks;
1338#endif
1339
1340        struct mm_struct *mm, *active_mm;
1341#ifdef CONFIG_COMPAT_BRK
1342        unsigned brk_randomized:1;
1343#endif
1344        /* per-thread vma caching */
1345        u32 vmacache_seqnum;
1346        struct vm_area_struct *vmacache[VMACACHE_SIZE];
1347#if defined(SPLIT_RSS_COUNTING)
1348        struct task_rss_stat    rss_stat;
1349#endif
1350/* task state */
1351        int exit_state;
1352        int exit_code, exit_signal;
1353        int pdeath_signal;  /*  The signal sent when the parent dies  */
1354        unsigned int jobctl;    /* JOBCTL_*, siglock protected */
1355
1356        /* Used for emulating ABI behavior of previous Linux versions */
1357        unsigned int personality;
1358
1359        unsigned in_execve:1;   /* Tell the LSMs that the process is doing an
1360                                 * execve */
1361        unsigned in_iowait:1;
1362
1363        /* Revert to default priority/policy when forking */
1364        unsigned sched_reset_on_fork:1;
1365        unsigned sched_contributes_to_load:1;
1366
1367#ifdef CONFIG_MEMCG_KMEM
1368        unsigned memcg_kmem_skip_account:1;
1369#endif
1370
1371        unsigned long atomic_flags; /* Flags needing atomic access. */
1372
1373        pid_t pid;
1374        pid_t tgid;
1375
1376#ifdef CONFIG_CC_STACKPROTECTOR
1377        /* Canary value for the -fstack-protector gcc feature */
1378        unsigned long stack_canary;
1379#endif
1380        /*
1381         * pointers to (original) parent process, youngest child, younger sibling,
1382         * older sibling, respectively.  (p->father can be replaced with
1383         * p->real_parent->pid)
1384         */
1385        struct task_struct __rcu *real_parent; /* real parent process */
1386        struct task_struct __rcu *parent; /* recipient of SIGCHLD, wait4() reports */
1387        /*
1388         * children/sibling forms the list of my natural children
1389         */
1390        struct list_head children;      /* list of my children */
1391        struct list_head sibling;       /* linkage in my parent's children list */
1392        struct task_struct *group_leader;       /* threadgroup leader */
1393
1394        /*
1395         * ptraced is the list of tasks this task is using ptrace on.
1396         * This includes both natural children and PTRACE_ATTACH targets.
1397         * p->ptrace_entry is p's link on the p->parent->ptraced list.
1398         */
1399        struct list_head ptraced;
1400        struct list_head ptrace_entry;
1401
1402        /* PID/PID hash table linkage. */
1403        struct pid_link pids[PIDTYPE_MAX];
1404        struct list_head thread_group;
1405        struct list_head thread_node;
1406
1407        struct completion *vfork_done;          /* for vfork() */
1408        int __user *set_child_tid;              /* CLONE_CHILD_SETTID */
1409        int __user *clear_child_tid;            /* CLONE_CHILD_CLEARTID */
1410
1411        cputime_t utime, stime, utimescaled, stimescaled;
1412        cputime_t gtime;
1413#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
1414        struct cputime prev_cputime;
1415#endif
1416#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1417        seqlock_t vtime_seqlock;
1418        unsigned long long vtime_snap;
1419        enum {
1420                VTIME_SLEEPING = 0,
1421                VTIME_USER,
1422                VTIME_SYS,
1423        } vtime_snap_whence;
1424#endif
1425        unsigned long nvcsw, nivcsw; /* context switch counts */
1426        u64 start_time;         /* monotonic time in nsec */
1427        u64 real_start_time;    /* boot based time in nsec */
1428/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1429        unsigned long min_flt, maj_flt;
1430
1431        struct task_cputime cputime_expires;
1432        struct list_head cpu_timers[3];
1433
1434/* process credentials */
1435        const struct cred __rcu *real_cred; /* objective and real subjective task
1436                                         * credentials (COW) */
1437        const struct cred __rcu *cred;  /* effective (overridable) subjective task
1438                                         * credentials (COW) */
1439        char comm[TASK_COMM_LEN]; /* executable name excluding path
1440                                     - access with [gs]et_task_comm (which lock
1441                                       it with task_lock())
1442                                     - initialized normally by setup_new_exec */
1443/* file system info */
1444        int link_count, total_link_count;
1445#ifdef CONFIG_SYSVIPC
1446/* ipc stuff */
1447        struct sysv_sem sysvsem;
1448        struct sysv_shm sysvshm;
1449#endif
1450#ifdef CONFIG_DETECT_HUNG_TASK
1451/* hung task detection */
1452        unsigned long last_switch_count;
1453#endif
1454/* CPU-specific state of this task */
1455        struct thread_struct thread;
1456/* filesystem information */
1457        struct fs_struct *fs;
1458/* open file information */
1459        struct files_struct *files;
1460/* namespaces */
1461        struct nsproxy *nsproxy;
1462/* signal handlers */
1463        struct signal_struct *signal;
1464        struct sighand_struct *sighand;
1465
1466        sigset_t blocked, real_blocked;
1467        sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
1468        struct sigpending pending;
1469
1470        unsigned long sas_ss_sp;
1471        size_t sas_ss_size;
1472        int (*notifier)(void *priv);
1473        void *notifier_data;
1474        sigset_t *notifier_mask;
1475        struct callback_head *task_works;
1476
1477        struct audit_context *audit_context;
1478#ifdef CONFIG_AUDITSYSCALL
1479        kuid_t loginuid;
1480        unsigned int sessionid;
1481#endif
1482        struct seccomp seccomp;
1483
1484/* Thread group tracking */
1485        u32 parent_exec_id;
1486        u32 self_exec_id;
1487/* Protection of (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed,
1488 * mempolicy */
1489        spinlock_t alloc_lock;
1490
1491        /* Protection of the PI data structures: */
1492        raw_spinlock_t pi_lock;
1493
1494#ifdef CONFIG_RT_MUTEXES
1495        /* PI waiters blocked on a rt_mutex held by this task */
1496        struct rb_root pi_waiters;
1497        struct rb_node *pi_waiters_leftmost;
1498        /* Deadlock detection and priority inheritance handling */
1499        struct rt_mutex_waiter *pi_blocked_on;
1500#endif
1501
1502#ifdef CONFIG_DEBUG_MUTEXES
1503        /* mutex deadlock detection */
1504        struct mutex_waiter *blocked_on;
1505#endif
1506#ifdef CONFIG_TRACE_IRQFLAGS
1507        unsigned int irq_events;
1508        unsigned long hardirq_enable_ip;
1509        unsigned long hardirq_disable_ip;
1510        unsigned int hardirq_enable_event;
1511        unsigned int hardirq_disable_event;
1512        int hardirqs_enabled;
1513        int hardirq_context;
1514        unsigned long softirq_disable_ip;
1515        unsigned long softirq_enable_ip;
1516        unsigned int softirq_disable_event;
1517        unsigned int softirq_enable_event;
1518        int softirqs_enabled;
1519        int softirq_context;
1520#endif
1521#ifdef CONFIG_LOCKDEP
1522# define MAX_LOCK_DEPTH 48UL
1523        u64 curr_chain_key;
1524        int lockdep_depth;
1525        unsigned int lockdep_recursion;
1526        struct held_lock held_locks[MAX_LOCK_DEPTH];
1527        gfp_t lockdep_reclaim_gfp;
1528#endif
1529
1530/* journalling filesystem info */
1531        void *journal_info;
1532
1533/* stacked block device info */
1534        struct bio_list *bio_list;
1535
1536#ifdef CONFIG_BLOCK
1537/* stack plugging */
1538        struct blk_plug *plug;
1539#endif
1540
1541/* VM state */
1542        struct reclaim_state *reclaim_state;
1543
1544        struct backing_dev_info *backing_dev_info;
1545
1546        struct io_context *io_context;
1547
1548        unsigned long ptrace_message;
1549        siginfo_t *last_siginfo; /* For ptrace use.  */
1550        struct task_io_accounting ioac;
1551#if defined(CONFIG_TASK_XACCT)
1552        u64 acct_rss_mem1;      /* accumulated rss usage */
1553        u64 acct_vm_mem1;       /* accumulated virtual memory usage */
1554        cputime_t acct_timexpd; /* stime + utime since last update */
1555#endif
1556#ifdef CONFIG_CPUSETS
1557        nodemask_t mems_allowed;        /* Protected by alloc_lock */
1558        seqcount_t mems_allowed_seq;    /* Seqence no to catch updates */
1559        int cpuset_mem_spread_rotor;
1560        int cpuset_slab_spread_rotor;
1561#endif
1562#ifdef CONFIG_CGROUPS
1563        /* Control Group info protected by css_set_lock */
1564        struct css_set __rcu *cgroups;
1565        /* cg_list protected by css_set_lock and tsk->alloc_lock */
1566        struct list_head cg_list;
1567#endif
1568#ifdef CONFIG_FUTEX
1569        struct robust_list_head __user *robust_list;
1570#ifdef CONFIG_COMPAT
1571        struct compat_robust_list_head __user *compat_robust_list;
1572#endif
1573        struct list_head pi_state_list;
1574        struct futex_pi_state *pi_state_cache;
1575#endif
1576#ifdef CONFIG_PERF_EVENTS
1577        struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
1578        struct mutex perf_event_mutex;
1579        struct list_head perf_event_list;
1580#endif
1581#ifdef CONFIG_DEBUG_PREEMPT
1582        unsigned long preempt_disable_ip;
1583#endif
1584#ifdef CONFIG_NUMA
1585        struct mempolicy *mempolicy;    /* Protected by alloc_lock */
1586        short il_next;
1587        short pref_node_fork;
1588#endif
1589#ifdef CONFIG_NUMA_BALANCING
1590        int numa_scan_seq;
1591        unsigned int numa_scan_period;
1592        unsigned int numa_scan_period_max;
1593        int numa_preferred_nid;
1594        unsigned long numa_migrate_retry;
1595        u64 node_stamp;                 /* migration stamp  */
1596        u64 last_task_numa_placement;
1597        u64 last_sum_exec_runtime;
1598        struct callback_head numa_work;
1599
1600        struct list_head numa_entry;
1601        struct numa_group *numa_group;
1602
1603        /*
1604         * numa_faults is an array split into four regions:
1605         * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1606         * in this precise order.
1607         *
1608         * faults_memory: Exponential decaying average of faults on a per-node
1609         * basis. Scheduling placement decisions are made based on these
1610         * counts. The values remain static for the duration of a PTE scan.
1611         * faults_cpu: Track the nodes the process was running on when a NUMA
1612         * hinting fault was incurred.
1613         * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1614         * during the current scan window. When the scan completes, the counts
1615         * in faults_memory and faults_cpu decay and these values are copied.
1616         */
1617        unsigned long *numa_faults;
1618        unsigned long total_numa_faults;
1619
1620        /*
1621         * numa_faults_locality tracks if faults recorded during the last
1622         * scan window were remote/local. The task scan period is adapted
1623         * based on the locality of the faults with different weights
1624         * depending on whether they were shared or private faults
1625         */
1626        unsigned long numa_faults_locality[2];
1627
1628        unsigned long numa_pages_migrated;
1629#endif /* CONFIG_NUMA_BALANCING */
1630
1631        struct rcu_head rcu;
1632
1633        /*
1634         * cache last used pipe for splice
1635         */
1636        struct pipe_inode_info *splice_pipe;
1637
1638        struct page_frag task_frag;
1639
1640#ifdef  CONFIG_TASK_DELAY_ACCT
1641        struct task_delay_info *delays;
1642#endif
1643#ifdef CONFIG_FAULT_INJECTION
1644        int make_it_fail;
1645#endif
1646        /*
1647         * when (nr_dirtied >= nr_dirtied_pause), it's time to call
1648         * balance_dirty_pages() for some dirty throttling pause
1649         */
1650        int nr_dirtied;
1651        int nr_dirtied_pause;
1652        unsigned long dirty_paused_when; /* start of a write-and-pause period */
1653
1654#ifdef CONFIG_LATENCYTOP
1655        int latency_record_count;
1656        struct latency_record latency_record[LT_SAVECOUNT];
1657#endif
1658        /*
1659         * time slack values; these are used to round up poll() and
1660         * select() etc timeout values. These are in nanoseconds.
1661         */
1662        unsigned long timer_slack_ns;
1663        unsigned long default_timer_slack_ns;
1664
1665#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1666        /* Index of current stored address in ret_stack */
1667        int curr_ret_stack;
1668        /* Stack of return addresses for return function tracing */
1669        struct ftrace_ret_stack *ret_stack;
1670        /* time stamp for last schedule */
1671        unsigned long long ftrace_timestamp;
1672        /*
1673         * Number of functions that haven't been traced
1674         * because of depth overrun.
1675         */
1676        atomic_t trace_overrun;
1677        /* Pause for the tracing */
1678        atomic_t tracing_graph_pause;
1679#endif
1680#ifdef CONFIG_TRACING
1681        /* state flags for use by tracers */
1682        unsigned long trace;
1683        /* bitmask and counter of trace recursion */
1684        unsigned long trace_recursion;
1685#endif /* CONFIG_TRACING */
1686#ifdef CONFIG_MEMCG
1687        struct memcg_oom_info {
1688                struct mem_cgroup *memcg;
1689                gfp_t gfp_mask;
1690                int order;
1691                unsigned int may_oom:1;
1692        } memcg_oom;
1693#endif
1694#ifdef CONFIG_UPROBES
1695        struct uprobe_task *utask;
1696#endif
1697#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1698        unsigned int    sequential_io;
1699        unsigned int    sequential_io_avg;
1700#endif
1701#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1702        unsigned long   task_state_change;
1703#endif
1704};
1705
1706/* Future-safe accessor for struct task_struct's cpus_allowed. */
1707#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
1708
1709#define TNF_MIGRATED    0x01
1710#define TNF_NO_GROUP    0x02
1711#define TNF_SHARED      0x04
1712#define TNF_FAULT_LOCAL 0x08
1713
1714#ifdef CONFIG_NUMA_BALANCING
1715extern void task_numa_fault(int last_node, int node, int pages, int flags);
1716extern pid_t task_numa_group_id(struct task_struct *p);
1717extern void set_numabalancing_state(bool enabled);
1718extern void task_numa_free(struct task_struct *p);
1719extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
1720                                        int src_nid, int dst_cpu);
1721#else
1722static inline void task_numa_fault(int last_node, int node, int pages,
1723                                   int flags)
1724{
1725}
1726static inline pid_t task_numa_group_id(struct task_struct *p)
1727{
1728        return 0;
1729}
1730static inline void set_numabalancing_state(bool enabled)
1731{
1732}
1733static inline void task_numa_free(struct task_struct *p)
1734{
1735}
1736static inline bool should_numa_migrate_memory(struct task_struct *p,
1737                                struct page *page, int src_nid, int dst_cpu)
1738{
1739        return true;
1740}
1741#endif
1742
1743static inline struct pid *task_pid(struct task_struct *task)
1744{
1745        return task->pids[PIDTYPE_PID].pid;
1746}
1747
1748static inline struct pid *task_tgid(struct task_struct *task)
1749{
1750        return task->group_leader->pids[PIDTYPE_PID].pid;
1751}
1752
1753/*
1754 * Without tasklist or rcu lock it is not safe to dereference
1755 * the result of task_pgrp/task_session even if task == current,
1756 * we can race with another thread doing sys_setsid/sys_setpgid.
1757 */
1758static inline struct pid *task_pgrp(struct task_struct *task)
1759{
1760        return task->group_leader->pids[PIDTYPE_PGID].pid;
1761}
1762
1763static inline struct pid *task_session(struct task_struct *task)
1764{
1765        return task->group_leader->pids[PIDTYPE_SID].pid;
1766}
1767
1768struct pid_namespace;
1769
1770/*
1771 * the helpers to get the task's different pids as they are seen
1772 * from various namespaces
1773 *
1774 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1775 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1776 *                     current.
1777 * task_xid_nr_ns()  : id seen from the ns specified;
1778 *
1779 * set_task_vxid()   : assigns a virtual id to a task;
1780 *
1781 * see also pid_nr() etc in include/linux/pid.h
1782 */
1783pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
1784                        struct pid_namespace *ns);
1785
1786static inline pid_t task_pid_nr(struct task_struct *tsk)
1787{
1788        return tsk->pid;
1789}
1790
1791static inline pid_t task_pid_nr_ns(struct task_struct *tsk,
1792                                        struct pid_namespace *ns)
1793{
1794        return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1795}
1796
1797static inline pid_t task_pid_vnr(struct task_struct *tsk)
1798{
1799        return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1800}
1801
1802
1803static inline pid_t task_tgid_nr(struct task_struct *tsk)
1804{
1805        return tsk->tgid;
1806}
1807
1808pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
1809
1810static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1811{
1812        return pid_vnr(task_tgid(tsk));
1813}
1814
1815
1816static inline int pid_alive(const struct task_struct *p);
1817static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1818{
1819        pid_t pid = 0;
1820
1821        rcu_read_lock();
1822        if (pid_alive(tsk))
1823                pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1824        rcu_read_unlock();
1825
1826        return pid;
1827}
1828
1829static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1830{
1831        return task_ppid_nr_ns(tsk, &init_pid_ns);
1832}
1833
1834static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
1835                                        struct pid_namespace *ns)
1836{
1837        return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1838}
1839
1840static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1841{
1842        return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1843}
1844
1845
1846static inline pid_t task_session_nr_ns(struct task_struct *tsk,
1847                                        struct pid_namespace *ns)
1848{
1849        return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1850}
1851
1852static inline pid_t task_session_vnr(struct task_struct *tsk)
1853{
1854        return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1855}
1856
1857/* obsolete, do not use */
1858static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1859{
1860        return task_pgrp_nr_ns(tsk, &init_pid_ns);
1861}
1862
1863/**
1864 * pid_alive - check that a task structure is not stale
1865 * @p: Task structure to be checked.
1866 *
1867 * Test if a process is not yet dead (at most zombie state)
1868 * If pid_alive fails, then pointers within the task structure
1869 * can be stale and must not be dereferenced.
1870 *
1871 * Return: 1 if the process is alive. 0 otherwise.
1872 */
1873static inline int pid_alive(const struct task_struct *p)
1874{
1875        return p->pids[PIDTYPE_PID].pid != NULL;
1876}
1877
1878/**
1879 * is_global_init - check if a task structure is init
1880 * @tsk: Task structure to be checked.
1881 *
1882 * Check if a task structure is the first user space task the kernel created.
1883 *
1884 * Return: 1 if the task structure is init. 0 otherwise.
1885 */
1886static inline int is_global_init(struct task_struct *tsk)
1887{
1888        return tsk->pid == 1;
1889}
1890
1891extern struct pid *cad_pid;
1892
1893extern void free_task(struct task_struct *tsk);
1894#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
1895
1896extern void __put_task_struct(struct task_struct *t);
1897
1898static inline void put_task_struct(struct task_struct *t)
1899{
1900        if (atomic_dec_and_test(&t->usage))
1901                __put_task_struct(t);
1902}
1903
1904#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
1905extern void task_cputime(struct task_struct *t,
1906                         cputime_t *utime, cputime_t *stime);
1907extern void task_cputime_scaled(struct task_struct *t,
1908                                cputime_t *utimescaled, cputime_t *stimescaled);
1909extern cputime_t task_gtime(struct task_struct *t);
1910#else
1911static inline void task_cputime(struct task_struct *t,
1912                                cputime_t *utime, cputime_t *stime)
1913{
1914        if (utime)
1915                *utime = t->utime;
1916        if (stime)
1917                *stime = t->stime;
1918}
1919
1920static inline void task_cputime_scaled(struct task_struct *t,
1921                                       cputime_t *utimescaled,
1922                                       cputime_t *stimescaled)
1923{
1924        if (utimescaled)
1925                *utimescaled = t->utimescaled;
1926        if (stimescaled)
1927                *stimescaled = t->stimescaled;
1928}
1929
1930static inline cputime_t task_gtime(struct task_struct *t)
1931{
1932        return t->gtime;
1933}
1934#endif
1935extern void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1936extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st);
1937
1938/*
1939 * Per process flags
1940 */
1941#define PF_EXITING      0x00000004      /* getting shut down */
1942#define PF_EXITPIDONE   0x00000008      /* pi exit done on shut down */
1943#define PF_VCPU         0x00000010      /* I'm a virtual CPU */
1944#define PF_WQ_WORKER    0x00000020      /* I'm a workqueue worker */
1945#define PF_FORKNOEXEC   0x00000040      /* forked but didn't exec */
1946#define PF_MCE_PROCESS  0x00000080      /* process policy on mce errors */
1947#define PF_SUPERPRIV    0x00000100      /* used super-user privileges */
1948#define PF_DUMPCORE     0x00000200      /* dumped core */
1949#define PF_SIGNALED     0x00000400      /* killed by a signal */
1950#define PF_MEMALLOC     0x00000800      /* Allocating memory */
1951#define PF_NPROC_EXCEEDED 0x00001000    /* set_user noticed that RLIMIT_NPROC was exceeded */
1952#define PF_USED_MATH    0x00002000      /* if unset the fpu must be initialized before use */
1953#define PF_USED_ASYNC   0x00004000      /* used async_schedule*(), used by module init */
1954#define PF_NOFREEZE     0x00008000      /* this thread should not be frozen */
1955#define PF_FROZEN       0x00010000      /* frozen for system suspend */
1956#define PF_FSTRANS      0x00020000      /* inside a filesystem transaction */
1957#define PF_KSWAPD       0x00040000      /* I am kswapd */
1958#define PF_MEMALLOC_NOIO 0x00080000     /* Allocating memory without IO involved */
1959#define PF_LESS_THROTTLE 0x00100000     /* Throttle me less: I clean memory */
1960#define PF_KTHREAD      0x00200000      /* I am a kernel thread */
1961#define PF_RANDOMIZE    0x00400000      /* randomize virtual address space */
1962#define PF_SWAPWRITE    0x00800000      /* Allowed to write to swap */
1963#define PF_NO_SETAFFINITY 0x04000000    /* Userland is not allowed to meddle with cpus_allowed */
1964#define PF_MCE_EARLY    0x08000000      /* Early kill for mce process policy */
1965#define PF_MUTEX_TESTER 0x20000000      /* Thread belongs to the rt mutex tester */
1966#define PF_FREEZER_SKIP 0x40000000      /* Freezer should not count it as freezable */
1967#define PF_SUSPEND_TASK 0x80000000      /* this thread called freeze_processes and should not be frozen */
1968
1969/*
1970 * Only the _current_ task can read/write to tsk->flags, but other
1971 * tasks can access tsk->flags in readonly mode for example
1972 * with tsk_used_math (like during threaded core dumping).
1973 * There is however an exception to this rule during ptrace
1974 * or during fork: the ptracer task is allowed to write to the
1975 * child->flags of its traced child (same goes for fork, the parent
1976 * can write to the child->flags), because we're guaranteed the
1977 * child is not running and in turn not changing child->flags
1978 * at the same time the parent does it.
1979 */
1980#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1981#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1982#define clear_used_math() clear_stopped_child_used_math(current)
1983#define set_used_math() set_stopped_child_used_math(current)
1984#define conditional_stopped_child_used_math(condition, child) \
1985        do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1986#define conditional_used_math(condition) \
1987        conditional_stopped_child_used_math(condition, current)
1988#define copy_to_stopped_child_used_math(child) \
1989        do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1990/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1991#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1992#define used_math() tsk_used_math(current)
1993
1994/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
1995 * __GFP_FS is also cleared as it implies __GFP_IO.
1996 */
1997static inline gfp_t memalloc_noio_flags(gfp_t flags)
1998{
1999        if (unlikely(current->flags & PF_MEMALLOC_NOIO))
2000                flags &= ~(__GFP_IO | __GFP_FS);
2001        return flags;
2002}
2003
2004static inline unsigned int memalloc_noio_save(void)
2005{
2006        unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
2007        current->flags |= PF_MEMALLOC_NOIO;
2008        return flags;
2009}
2010
2011static inline void memalloc_noio_restore(unsigned int flags)
2012{
2013        current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
2014}
2015
2016/* Per-process atomic flags. */
2017#define PFA_NO_NEW_PRIVS 0      /* May not gain new privileges. */
2018#define PFA_SPREAD_PAGE  1      /* Spread page cache over cpuset */
2019#define PFA_SPREAD_SLAB  2      /* Spread some slab caches over cpuset */
2020
2021
2022#define TASK_PFA_TEST(name, func)                                       \
2023        static inline bool task_##func(struct task_struct *p)           \
2024        { return test_bit(PFA_##name, &p->atomic_flags); }
2025#define TASK_PFA_SET(name, func)                                        \
2026        static inline void task_set_##func(struct task_struct *p)       \
2027        { set_bit(PFA_##name, &p->atomic_flags); }
2028#define TASK_PFA_CLEAR(name, func)                                      \
2029        static inline void task_clear_##func(struct task_struct *p)     \
2030        { clear_bit(PFA_##name, &p->atomic_flags); }
2031
2032TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
2033TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
2034
2035TASK_PFA_TEST(SPREAD_PAGE, spread_page)
2036TASK_PFA_SET(SPREAD_PAGE, spread_page)
2037TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
2038
2039TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
2040TASK_PFA_SET(SPREAD_SLAB, spread_slab)
2041TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
2042
2043/*
2044 * task->jobctl flags
2045 */
2046#define JOBCTL_STOP_SIGMASK     0xffff  /* signr of the last group stop */
2047
2048#define JOBCTL_STOP_DEQUEUED_BIT 16     /* stop signal dequeued */
2049#define JOBCTL_STOP_PENDING_BIT 17      /* task should stop for group stop */
2050#define JOBCTL_STOP_CONSUME_BIT 18      /* consume group stop count */
2051#define JOBCTL_TRAP_STOP_BIT    19      /* trap for STOP */
2052#define JOBCTL_TRAP_NOTIFY_BIT  20      /* trap for NOTIFY */
2053#define JOBCTL_TRAPPING_BIT     21      /* switching to TRACED */
2054#define JOBCTL_LISTENING_BIT    22      /* ptracer is listening for events */
2055
2056#define JOBCTL_STOP_DEQUEUED    (1 << JOBCTL_STOP_DEQUEUED_BIT)
2057#define JOBCTL_STOP_PENDING     (1 << JOBCTL_STOP_PENDING_BIT)
2058#define JOBCTL_STOP_CONSUME     (1 << JOBCTL_STOP_CONSUME_BIT)
2059#define JOBCTL_TRAP_STOP        (1 << JOBCTL_TRAP_STOP_BIT)
2060#define JOBCTL_TRAP_NOTIFY      (1 << JOBCTL_TRAP_NOTIFY_BIT)
2061#define JOBCTL_TRAPPING         (1 << JOBCTL_TRAPPING_BIT)
2062#define JOBCTL_LISTENING        (1 << JOBCTL_LISTENING_BIT)
2063
2064#define JOBCTL_TRAP_MASK        (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
2065#define JOBCTL_PENDING_MASK     (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
2066
2067extern bool task_set_jobctl_pending(struct task_struct *task,
2068                                    unsigned int mask);
2069extern void task_clear_jobctl_trapping(struct task_struct *task);
2070extern void task_clear_jobctl_pending(struct task_struct *task,
2071                                      unsigned int mask);
2072
2073static inline void rcu_copy_process(struct task_struct *p)
2074{
2075#ifdef CONFIG_PREEMPT_RCU
2076        p->rcu_read_lock_nesting = 0;
2077        p->rcu_read_unlock_special.s = 0;
2078        p->rcu_blocked_node = NULL;
2079        INIT_LIST_HEAD(&p->rcu_node_entry);
2080#endif /* #ifdef CONFIG_PREEMPT_RCU */
2081#ifdef CONFIG_TASKS_RCU
2082        p->rcu_tasks_holdout = false;
2083        INIT_LIST_HEAD(&p->rcu_tasks_holdout_list);
2084        p->rcu_tasks_idle_cpu = -1;
2085#endif /* #ifdef CONFIG_TASKS_RCU */
2086}
2087
2088static inline void tsk_restore_flags(struct task_struct *task,
2089                                unsigned long orig_flags, unsigned long flags)
2090{
2091        task->flags &= ~flags;
2092        task->flags |= orig_flags & flags;
2093}
2094
2095extern int cpuset_cpumask_can_shrink(const struct cpumask *cur,
2096                                     const struct cpumask *trial);
2097extern int task_can_attach(struct task_struct *p,
2098                           const struct cpumask *cs_cpus_allowed);
2099#ifdef CONFIG_SMP
2100extern void do_set_cpus_allowed(struct task_struct *p,
2101                               const struct cpumask *new_mask);
2102
2103extern int set_cpus_allowed_ptr(struct task_struct *p,
2104                                const struct cpumask *new_mask);
2105#else
2106static inline void do_set_cpus_allowed(struct task_struct *p,
2107                                      const struct cpumask *new_mask)
2108{
2109}
2110static inline int set_cpus_allowed_ptr(struct task_struct *p,
2111                                       const struct cpumask *new_mask)
2112{
2113        if (!cpumask_test_cpu(0, new_mask))
2114                return -EINVAL;
2115        return 0;
2116}
2117#endif
2118
2119#ifdef CONFIG_NO_HZ_COMMON
2120void calc_load_enter_idle(void);
2121void calc_load_exit_idle(void);
2122#else
2123static inline void calc_load_enter_idle(void) { }
2124static inline void calc_load_exit_idle(void) { }
2125#endif /* CONFIG_NO_HZ_COMMON */
2126
2127#ifndef CONFIG_CPUMASK_OFFSTACK
2128static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
2129{
2130        return set_cpus_allowed_ptr(p, &new_mask);
2131}
2132#endif
2133
2134/*
2135 * Do not use outside of architecture code which knows its limitations.
2136 *
2137 * sched_clock() has no promise of monotonicity or bounded drift between
2138 * CPUs, use (which you should not) requires disabling IRQs.
2139 *
2140 * Please use one of the three interfaces below.
2141 */
2142extern unsigned long long notrace sched_clock(void);
2143/*
2144 * See the comment in kernel/sched/clock.c
2145 */
2146extern u64 cpu_clock(int cpu);
2147extern u64 local_clock(void);
2148extern u64 sched_clock_cpu(int cpu);
2149
2150
2151extern void sched_clock_init(void);
2152
2153#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2154static inline void sched_clock_tick(void)
2155{
2156}
2157
2158static inline void sched_clock_idle_sleep_event(void)
2159{
2160}
2161
2162static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
2163{
2164}
2165#else
2166/*
2167 * Architectures can set this to 1 if they have specified
2168 * CONFIG_HAVE_UNSTABLE_SCHED_CLOCK in their arch Kconfig,
2169 * but then during bootup it turns out that sched_clock()
2170 * is reliable after all:
2171 */
2172extern int sched_clock_stable(void);
2173extern void set_sched_clock_stable(void);
2174extern void clear_sched_clock_stable(void);
2175
2176extern void sched_clock_tick(void);
2177extern void sched_clock_idle_sleep_event(void);
2178extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2179#endif
2180
2181#ifdef CONFIG_IRQ_TIME_ACCOUNTING
2182/*
2183 * An i/f to runtime opt-in for irq time accounting based off of sched_clock.
2184 * The reason for this explicit opt-in is not to have perf penalty with
2185 * slow sched_clocks.
2186 */
2187extern void enable_sched_clock_irqtime(void);
2188extern void disable_sched_clock_irqtime(void);
2189#else
2190static inline void enable_sched_clock_irqtime(void) {}
2191static inline void disable_sched_clock_irqtime(void) {}
2192#endif
2193
2194extern unsigned long long
2195task_sched_runtime(struct task_struct *task);
2196
2197/* sched_exec is called by processes performing an exec */
2198#ifdef CONFIG_SMP
2199extern void sched_exec(void);
2200#else
2201#define sched_exec()   {}
2202#endif
2203
2204extern void sched_clock_idle_sleep_event(void);
2205extern void sched_clock_idle_wakeup_event(u64 delta_ns);
2206
2207#ifdef CONFIG_HOTPLUG_CPU
2208extern void idle_task_exit(void);
2209#else
2210static inline void idle_task_exit(void) {}
2211#endif
2212
2213#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
2214extern void wake_up_nohz_cpu(int cpu);
2215#else
2216static inline void wake_up_nohz_cpu(int cpu) { }
2217#endif
2218
2219#ifdef CONFIG_NO_HZ_FULL
2220extern bool sched_can_stop_tick(void);
2221extern u64 scheduler_tick_max_deferment(void);
2222#else
2223static inline bool sched_can_stop_tick(void) { return false; }
2224#endif
2225
2226#ifdef CONFIG_SCHED_AUTOGROUP
2227extern void sched_autogroup_create_attach(struct task_struct *p);
2228extern void sched_autogroup_detach(struct task_struct *p);
2229extern void sched_autogroup_fork(struct signal_struct *sig);
2230extern void sched_autogroup_exit(struct signal_struct *sig);
2231#ifdef CONFIG_PROC_FS
2232extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
2233extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
2234#endif
2235#else
2236static inline void sched_autogroup_create_attach(struct task_struct *p) { }
2237static inline void sched_autogroup_detach(struct task_struct *p) { }
2238static inline void sched_autogroup_fork(struct signal_struct *sig) { }
2239static inline void sched_autogroup_exit(struct signal_struct *sig) { }
2240#endif
2241
2242extern int yield_to(struct task_struct *p, bool preempt);
2243extern void set_user_nice(struct task_struct *p, long nice);
2244extern int task_prio(const struct task_struct *p);
2245/**
2246 * task_nice - return the nice value of a given task.
2247 * @p: the task in question.
2248 *
2249 * Return: The nice value [ -20 ... 0 ... 19 ].
2250 */
2251static inline int task_nice(const struct task_struct *p)
2252{
2253        return PRIO_TO_NICE((p)->static_prio);
2254}
2255extern int can_nice(const struct task_struct *p, const int nice);
2256extern int task_curr(const struct task_struct *p);
2257extern int idle_cpu(int cpu);
2258extern int sched_setscheduler(struct task_struct *, int,
2259                              const struct sched_param *);
2260extern int sched_setscheduler_nocheck(struct task_struct *, int,
2261                                      const struct sched_param *);
2262extern int sched_setattr(struct task_struct *,
2263                         const struct sched_attr *);
2264extern struct task_struct *idle_task(int cpu);
2265/**
2266 * is_idle_task - is the specified task an idle task?
2267 * @p: the task in question.
2268 *
2269 * Return: 1 if @p is an idle task. 0 otherwise.
2270 */
2271static inline bool is_idle_task(const struct task_struct *p)
2272{
2273        return p->pid == 0;
2274}
2275extern struct task_struct *curr_task(int cpu);
2276extern void set_curr_task(int cpu, struct task_struct *p);
2277
2278void yield(void);
2279
2280/*
2281 * The default (Linux) execution domain.
2282 */
2283extern struct exec_domain       default_exec_domain;
2284
2285union thread_union {
2286        struct thread_info thread_info;
2287        unsigned long stack[THREAD_SIZE/sizeof(long)];
2288};
2289
2290#ifndef __HAVE_ARCH_KSTACK_END
2291static inline int kstack_end(void *addr)
2292{
2293        /* Reliable end of stack detection:
2294         * Some APM bios versions misalign the stack
2295         */
2296        return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
2297}
2298#endif
2299
2300extern union thread_union init_thread_union;
2301extern struct task_struct init_task;
2302
2303extern struct   mm_struct init_mm;
2304
2305extern struct pid_namespace init_pid_ns;
2306
2307/*
2308 * find a task by one of its numerical ids
2309 *
2310 * find_task_by_pid_ns():
2311 *      finds a task by its pid in the specified namespace
2312 * find_task_by_vpid():
2313 *      finds a task by its virtual pid
2314 *
2315 * see also find_vpid() etc in include/linux/pid.h
2316 */
2317
2318extern struct task_struct *find_task_by_vpid(pid_t nr);
2319extern struct task_struct *find_task_by_pid_ns(pid_t nr,
2320                struct pid_namespace *ns);
2321
2322/* per-UID process charging. */
2323extern struct user_struct * alloc_uid(kuid_t);
2324static inline struct user_struct *get_uid(struct user_struct *u)
2325{
2326        atomic_inc(&u->__count);
2327        return u;
2328}
2329extern void free_uid(struct user_struct *);
2330
2331#include <asm/current.h>
2332
2333extern void xtime_update(unsigned long ticks);
2334
2335extern int wake_up_state(struct task_struct *tsk, unsigned int state);
2336extern int wake_up_process(struct task_struct *tsk);
2337extern void wake_up_new_task(struct task_struct *tsk);
2338#ifdef CONFIG_SMP
2339 extern void kick_process(struct task_struct *tsk);
2340#else
2341 static inline void kick_process(struct task_struct *tsk) { }
2342#endif
2343extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
2344extern void sched_dead(struct task_struct *p);
2345
2346extern void proc_caches_init(void);
2347extern void flush_signals(struct task_struct *);
2348extern void __flush_signals(struct task_struct *);
2349extern void ignore_signals(struct task_struct *);
2350extern void flush_signal_handlers(struct task_struct *, int force_default);
2351extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
2352
2353static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
2354{
2355        unsigned long flags;
2356        int ret;
2357
2358        spin_lock_irqsave(&tsk->sighand->siglock, flags);
2359        ret = dequeue_signal(tsk, mask, info);
2360        spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
2361
2362        return ret;
2363}
2364
2365extern void block_all_signals(int (*notifier)(void *priv), void *priv,
2366                              sigset_t *mask);
2367extern void unblock_all_signals(void);
2368extern void release_task(struct task_struct * p);
2369extern int send_sig_info(int, struct siginfo *, struct task_struct *);
2370extern int force_sigsegv(int, struct task_struct *);
2371extern int force_sig_info(int, struct siginfo *, struct task_struct *);
2372extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
2373extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
2374extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
2375                                const struct cred *, u32);
2376extern int kill_pgrp(struct pid *pid, int sig, int priv);
2377extern int kill_pid(struct pid *pid, int sig, int priv);
2378extern int kill_proc_info(int, struct siginfo *, pid_t);
2379extern __must_check bool do_notify_parent(struct task_struct *, int);
2380extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
2381extern void force_sig(int, struct task_struct *);
2382extern int send_sig(int, struct task_struct *, int);
2383extern int zap_other_threads(struct task_struct *p);
2384extern struct sigqueue *sigqueue_alloc(void);
2385extern void sigqueue_free(struct sigqueue *);
2386extern int send_sigqueue(struct sigqueue *,  struct task_struct *, int group);
2387extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2388
2389static inline void restore_saved_sigmask(void)
2390{
2391        if (test_and_clear_restore_sigmask())
2392                __set_current_blocked(&current->saved_sigmask);
2393}
2394
2395static inline sigset_t *sigmask_to_save(void)
2396{
2397        sigset_t *res = &current->blocked;
2398        if (unlikely(test_restore_sigmask()))
2399                res = &current->saved_sigmask;
2400        return res;
2401}
2402
2403static inline int kill_cad_pid(int sig, int priv)
2404{
2405        return kill_pid(cad_pid, sig, priv);
2406}
2407
2408/* These can be the second arg to send_sig_info/send_group_sig_info.  */
2409#define SEND_SIG_NOINFO ((struct siginfo *) 0)
2410#define SEND_SIG_PRIV   ((struct siginfo *) 1)
2411#define SEND_SIG_FORCED ((struct siginfo *) 2)
2412
2413/*
2414 * True if we are on the alternate signal stack.
2415 */
2416static inline int on_sig_stack(unsigned long sp)
2417{
2418#ifdef CONFIG_STACK_GROWSUP
2419        return sp >= current->sas_ss_sp &&
2420                sp - current->sas_ss_sp < current->sas_ss_size;
2421#else
2422        return sp > current->sas_ss_sp &&
2423                sp - current->sas_ss_sp <= current->sas_ss_size;
2424#endif
2425}
2426
2427static inline int sas_ss_flags(unsigned long sp)
2428{
2429        if (!current->sas_ss_size)
2430                return SS_DISABLE;
2431
2432        return on_sig_stack(sp) ? SS_ONSTACK : 0;
2433}
2434
2435static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
2436{
2437        if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
2438#ifdef CONFIG_STACK_GROWSUP
2439                return current->sas_ss_sp;
2440#else
2441                return current->sas_ss_sp + current->sas_ss_size;
2442#endif
2443        return sp;
2444}
2445
2446/*
2447 * Routines for handling mm_structs
2448 */
2449extern struct mm_struct * mm_alloc(void);
2450
2451/* mmdrop drops the mm and the page tables */
2452extern void __mmdrop(struct mm_struct *);
2453static inline void mmdrop(struct mm_struct * mm)
2454{
2455        if (unlikely(atomic_dec_and_test(&mm->mm_count)))
2456                __mmdrop(mm);
2457}
2458
2459/* mmput gets rid of the mappings and all user-space */
2460extern void mmput(struct mm_struct *);
2461/* Grab a reference to a task's mm, if it is not already going away */
2462extern struct mm_struct *get_task_mm(struct task_struct *task);
2463/*
2464 * Grab a reference to a task's mm, if it is not already going away
2465 * and ptrace_may_access with the mode parameter passed to it
2466 * succeeds.
2467 */
2468extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
2469/* Remove the current tasks stale references to the old mm_struct */
2470extern void mm_release(struct task_struct *, struct mm_struct *);
2471
2472extern int copy_thread(unsigned long, unsigned long, unsigned long,
2473                        struct task_struct *);
2474extern void flush_thread(void);
2475extern void exit_thread(void);
2476
2477extern void exit_files(struct task_struct *);
2478extern void __cleanup_sighand(struct sighand_struct *);
2479
2480extern void exit_itimers(struct signal_struct *);
2481extern void flush_itimer_signals(void);
2482
2483extern void do_group_exit(int);
2484
2485extern int do_execve(struct filename *,
2486                     const char __user * const __user *,
2487                     const char __user * const __user *);
2488extern int do_execveat(int, struct filename *,
2489                       const char __user * const __user *,
2490                       const char __user * const __user *,
2491                       int);
2492extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
2493struct task_struct *fork_idle(int);
2494extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
2495
2496extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
2497static inline void set_task_comm(struct task_struct *tsk, const char *from)
2498{
2499        __set_task_comm(tsk, from, false);
2500}
2501extern char *get_task_comm(char *to, struct task_struct *tsk);
2502
2503#ifdef CONFIG_SMP
2504void scheduler_ipi(void);
2505extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
2506#else
2507static inline void scheduler_ipi(void) { }
2508static inline unsigned long wait_task_inactive(struct task_struct *p,
2509                                               long match_state)
2510{
2511        return 1;
2512}
2513#endif
2514
2515#define next_task(p) \
2516        list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
2517
2518#define for_each_process(p) \
2519        for (p = &init_task ; (p = next_task(p)) != &init_task ; )
2520
2521extern bool current_is_single_threaded(void);
2522
2523/*
2524 * Careful: do_each_thread/while_each_thread is a double loop so
2525 *          'break' will not work as expected - use goto instead.
2526 */
2527#define do_each_thread(g, t) \
2528        for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
2529
2530#define while_each_thread(g, t) \
2531        while ((t = next_thread(t)) != g)
2532
2533#define __for_each_thread(signal, t)    \
2534        list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
2535
2536#define for_each_thread(p, t)           \
2537        __for_each_thread((p)->signal, t)
2538
2539/* Careful: this is a double loop, 'break' won't work as expected. */
2540#define for_each_process_thread(p, t)   \
2541        for_each_process(p) for_each_thread(p, t)
2542
2543static inline int get_nr_threads(struct task_struct *tsk)
2544{
2545        return tsk->signal->nr_threads;
2546}
2547
2548static inline bool thread_group_leader(struct task_struct *p)
2549{
2550        return p->exit_signal >= 0;
2551}
2552
2553/* Do to the insanities of de_thread it is possible for a process
2554 * to have the pid of the thread group leader without actually being
2555 * the thread group leader.  For iteration through the pids in proc
2556 * all we care about is that we have a task with the appropriate
2557 * pid, we don't actually care if we have the right task.
2558 */
2559static inline bool has_group_leader_pid(struct task_struct *p)
2560{
2561        return task_pid(p) == p->signal->leader_pid;
2562}
2563
2564static inline
2565bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
2566{
2567        return p1->signal == p2->signal;
2568}
2569
2570static inline struct task_struct *next_thread(const struct task_struct *p)
2571{
2572        return list_entry_rcu(p->thread_group.next,
2573                              struct task_struct, thread_group);
2574}
2575
2576static inline int thread_group_empty(struct task_struct *p)
2577{
2578        return list_empty(&p->thread_group);
2579}
2580
2581#define delay_group_leader(p) \
2582                (thread_group_leader(p) && !thread_group_empty(p))
2583
2584/*
2585 * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
2586 * subscriptions and synchronises with wait4().  Also used in procfs.  Also
2587 * pins the final release of task.io_context.  Also protects ->cpuset and
2588 * ->cgroup.subsys[]. And ->vfork_done.
2589 *
2590 * Nests both inside and outside of read_lock(&tasklist_lock).
2591 * It must not be nested with write_lock_irq(&tasklist_lock),
2592 * neither inside nor outside.
2593 */
2594static inline void task_lock(struct task_struct *p)
2595{
2596        spin_lock(&p->alloc_lock);
2597}
2598
2599static inline void task_unlock(struct task_struct *p)
2600{
2601        spin_unlock(&p->alloc_lock);
2602}
2603
2604extern struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
2605                                                        unsigned long *flags);
2606
2607static inline struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
2608                                                       unsigned long *flags)
2609{
2610        struct sighand_struct *ret;
2611
2612        ret = __lock_task_sighand(tsk, flags);
2613        (void)__cond_lock(&tsk->sighand->siglock, ret);
2614        return ret;
2615}
2616
2617static inline void unlock_task_sighand(struct task_struct *tsk,
2618                                                unsigned long *flags)
2619{
2620        spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2621}
2622
2623#ifdef CONFIG_CGROUPS
2624static inline void threadgroup_change_begin(struct task_struct *tsk)
2625{
2626        down_read(&tsk->signal->group_rwsem);
2627}
2628static inline void threadgroup_change_end(struct task_struct *tsk)
2629{
2630        up_read(&tsk->signal->group_rwsem);
2631}
2632
2633/**
2634 * threadgroup_lock - lock threadgroup
2635 * @tsk: member task of the threadgroup to lock
2636 *
2637 * Lock the threadgroup @tsk belongs to.  No new task is allowed to enter
2638 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2639 * change ->group_leader/pid.  This is useful for cases where the threadgroup
2640 * needs to stay stable across blockable operations.
2641 *
2642 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2643 * synchronization.  While held, no new task will be added to threadgroup
2644 * and no existing live task will have its PF_EXITING set.
2645 *
2646 * de_thread() does threadgroup_change_{begin|end}() when a non-leader
2647 * sub-thread becomes a new leader.
2648 */
2649static inline void threadgroup_lock(struct task_struct *tsk)
2650{
2651        down_write(&tsk->signal->group_rwsem);
2652}
2653
2654/**
2655 * threadgroup_unlock - unlock threadgroup
2656 * @tsk: member task of the threadgroup to unlock
2657 *
2658 * Reverse threadgroup_lock().
2659 */
2660static inline void threadgroup_unlock(struct task_struct *tsk)
2661{
2662        up_write(&tsk->signal->group_rwsem);
2663}
2664#else
2665static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2666static inline void threadgroup_change_end(struct task_struct *tsk) {}
2667static inline void threadgroup_lock(struct task_struct *tsk) {}
2668static inline void threadgroup_unlock(struct task_struct *tsk) {}
2669#endif
2670
2671#ifndef __HAVE_THREAD_FUNCTIONS
2672
2673#define task_thread_info(task)  ((struct thread_info *)(task)->stack)
2674#define task_stack_page(task)   ((task)->stack)
2675
2676static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
2677{
2678        *task_thread_info(p) = *task_thread_info(org);
2679        task_thread_info(p)->task = p;
2680}
2681
2682/*
2683 * Return the address of the last usable long on the stack.
2684 *
2685 * When the stack grows down, this is just above the thread
2686 * info struct. Going any lower will corrupt the threadinfo.
2687 *
2688 * When the stack grows up, this is the highest address.
2689 * Beyond that position, we corrupt data on the next page.
2690 */
2691static inline unsigned long *end_of_stack(struct task_struct *p)
2692{
2693#ifdef CONFIG_STACK_GROWSUP
2694        return (unsigned long *)((unsigned long)task_thread_info(p) + THREAD_SIZE) - 1;
2695#else
2696        return (unsigned long *)(task_thread_info(p) + 1);
2697#endif
2698}
2699
2700#endif
2701#define task_stack_end_corrupted(task) \
2702                (*(end_of_stack(task)) != STACK_END_MAGIC)
2703
2704static inline int object_is_on_stack(void *obj)
2705{
2706        void *stack = task_stack_page(current);
2707
2708        return (obj >= stack) && (obj < (stack + THREAD_SIZE));
2709}
2710
2711extern void thread_info_cache_init(void);
2712
2713#ifdef CONFIG_DEBUG_STACK_USAGE
2714static inline unsigned long stack_not_used(struct task_struct *p)
2715{
2716        unsigned long *n = end_of_stack(p);
2717
2718        do {    /* Skip over canary */
2719                n++;
2720        } while (!*n);
2721
2722        return (unsigned long)n - (unsigned long)end_of_stack(p);
2723}
2724#endif
2725extern void set_task_stack_end_magic(struct task_struct *tsk);
2726
2727/* set thread flags in other task's structures
2728 * - see asm/thread_info.h for TIF_xxxx flags available
2729 */
2730static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
2731{
2732        set_ti_thread_flag(task_thread_info(tsk), flag);
2733}
2734
2735static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2736{
2737        clear_ti_thread_flag(task_thread_info(tsk), flag);
2738}
2739
2740static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
2741{
2742        return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
2743}
2744
2745static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
2746{
2747        return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
2748}
2749
2750static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
2751{
2752        return test_ti_thread_flag(task_thread_info(tsk), flag);
2753}
2754
2755static inline void set_tsk_need_resched(struct task_struct *tsk)
2756{
2757        set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2758}
2759
2760static inline void clear_tsk_need_resched(struct task_struct *tsk)
2761{
2762        clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
2763}
2764
2765static inline int test_tsk_need_resched(struct task_struct *tsk)
2766{
2767        return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
2768}
2769
2770static inline int restart_syscall(void)
2771{
2772        set_tsk_thread_flag(current, TIF_SIGPENDING);
2773        return -ERESTARTNOINTR;
2774}
2775
2776static inline int signal_pending(struct task_struct *p)
2777{
2778        return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
2779}
2780
2781static inline int __fatal_signal_pending(struct task_struct *p)
2782{
2783        return unlikely(sigismember(&p->pending.signal, SIGKILL));
2784}
2785
2786static inline int fatal_signal_pending(struct task_struct *p)
2787{
2788        return signal_pending(p) && __fatal_signal_pending(p);
2789}
2790
2791static inline int signal_pending_state(long state, struct task_struct *p)
2792{
2793        if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
2794                return 0;
2795        if (!signal_pending(p))
2796                return 0;
2797
2798        return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
2799}
2800
2801/*
2802 * cond_resched() and cond_resched_lock(): latency reduction via
2803 * explicit rescheduling in places that are safe. The return
2804 * value indicates whether a reschedule was done in fact.
2805 * cond_resched_lock() will drop the spinlock before scheduling,
2806 * cond_resched_softirq() will enable bhs before scheduling.
2807 */
2808extern int _cond_resched(void);
2809
2810#define cond_resched() ({                       \
2811        ___might_sleep(__FILE__, __LINE__, 0);  \
2812        _cond_resched();                        \
2813})
2814
2815extern int __cond_resched_lock(spinlock_t *lock);
2816
2817#ifdef CONFIG_PREEMPT_COUNT
2818#define PREEMPT_LOCK_OFFSET     PREEMPT_OFFSET
2819#else
2820#define PREEMPT_LOCK_OFFSET     0
2821#endif
2822
2823#define cond_resched_lock(lock) ({                              \
2824        ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
2825        __cond_resched_lock(lock);                              \
2826})
2827
2828extern int __cond_resched_softirq(void);
2829
2830#define cond_resched_softirq() ({                                       \
2831        ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);     \
2832        __cond_resched_softirq();                                       \
2833})
2834
2835static inline void cond_resched_rcu(void)
2836{
2837#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
2838        rcu_read_unlock();
2839        cond_resched();
2840        rcu_read_lock();
2841#endif
2842}
2843
2844/*
2845 * Does a critical section need to be broken due to another
2846 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
2847 * but a general need for low latency)
2848 */
2849static inline int spin_needbreak(spinlock_t *lock)
2850{
2851#ifdef CONFIG_PREEMPT
2852        return spin_is_contended(lock);
2853#else
2854        return 0;
2855#endif
2856}
2857
2858/*
2859 * Idle thread specific functions to determine the need_resched
2860 * polling state.
2861 */
2862#ifdef TIF_POLLING_NRFLAG
2863static inline int tsk_is_polling(struct task_struct *p)
2864{
2865        return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
2866}
2867
2868static inline void __current_set_polling(void)
2869{
2870        set_thread_flag(TIF_POLLING_NRFLAG);
2871}
2872
2873static inline bool __must_check current_set_polling_and_test(void)
2874{
2875        __current_set_polling();
2876
2877        /*
2878         * Polling state must be visible before we test NEED_RESCHED,
2879         * paired by resched_curr()
2880         */
2881        smp_mb__after_atomic();
2882
2883        return unlikely(tif_need_resched());
2884}
2885
2886static inline void __current_clr_polling(void)
2887{
2888        clear_thread_flag(TIF_POLLING_NRFLAG);
2889}
2890
2891static inline bool __must_check current_clr_polling_and_test(void)
2892{
2893        __current_clr_polling();
2894
2895        /*
2896         * Polling state must be visible before we test NEED_RESCHED,
2897         * paired by resched_curr()
2898         */
2899        smp_mb__after_atomic();
2900
2901        return unlikely(tif_need_resched());
2902}
2903
2904#else
2905static inline int tsk_is_polling(struct task_struct *p) { return 0; }
2906static inline void __current_set_polling(void) { }
2907static inline void __current_clr_polling(void) { }
2908
2909static inline bool __must_check current_set_polling_and_test(void)
2910{
2911        return unlikely(tif_need_resched());
2912}
2913static inline bool __must_check current_clr_polling_and_test(void)
2914{
2915        return unlikely(tif_need_resched());
2916}
2917#endif
2918
2919static inline void current_clr_polling(void)
2920{
2921        __current_clr_polling();
2922
2923        /*
2924         * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
2925         * Once the bit is cleared, we'll get IPIs with every new
2926         * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
2927         * fold.
2928         */
2929        smp_mb(); /* paired with resched_curr() */
2930
2931        preempt_fold_need_resched();
2932}
2933
2934static __always_inline bool need_resched(void)
2935{
2936        return unlikely(tif_need_resched());
2937}
2938
2939/*
2940 * Thread group CPU time accounting.
2941 */
2942void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
2943void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
2944
2945static inline void thread_group_cputime_init(struct signal_struct *sig)
2946{
2947        raw_spin_lock_init(&sig->cputimer.lock);
2948}
2949
2950/*
2951 * Reevaluate whether the task has signals pending delivery.
2952 * Wake the task if so.
2953 * This is required every time the blocked sigset_t changes.
2954 * callers must hold sighand->siglock.
2955 */
2956extern void recalc_sigpending_and_wake(struct task_struct *t);
2957extern void recalc_sigpending(void);
2958
2959extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
2960
2961static inline void signal_wake_up(struct task_struct *t, bool resume)
2962{
2963        signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
2964}
2965static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
2966{
2967        signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
2968}
2969
2970/*
2971 * Wrappers for p->thread_info->cpu access. No-op on UP.
2972 */
2973#ifdef CONFIG_SMP
2974
2975static inline unsigned int task_cpu(const struct task_struct *p)
2976{
2977        return task_thread_info(p)->cpu;
2978}
2979
2980static inline int task_node(const struct task_struct *p)
2981{
2982        return cpu_to_node(task_cpu(p));
2983}
2984
2985extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
2986
2987#else
2988
2989static inline unsigned int task_cpu(const struct task_struct *p)
2990{
2991        return 0;
2992}
2993
2994static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
2995{
2996}
2997
2998#endif /* CONFIG_SMP */
2999
3000extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
3001extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
3002
3003#ifdef CONFIG_CGROUP_SCHED
3004extern struct task_group root_task_group;
3005#endif /* CONFIG_CGROUP_SCHED */
3006
3007extern int task_can_switch_user(struct user_struct *up,
3008                                        struct task_struct *tsk);
3009
3010#ifdef CONFIG_TASK_XACCT
3011static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3012{
3013        tsk->ioac.rchar += amt;
3014}
3015
3016static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3017{
3018        tsk->ioac.wchar += amt;
3019}
3020
3021static inline void inc_syscr(struct task_struct *tsk)
3022{
3023        tsk->ioac.syscr++;
3024}
3025
3026static inline void inc_syscw(struct task_struct *tsk)
3027{
3028        tsk->ioac.syscw++;
3029}
3030#else
3031static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
3032{
3033}
3034
3035static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
3036{
3037}
3038
3039static inline void inc_syscr(struct task_struct *tsk)
3040{
3041}
3042
3043static inline void inc_syscw(struct task_struct *tsk)
3044{
3045}
3046#endif
3047
3048#ifndef TASK_SIZE_OF
3049#define TASK_SIZE_OF(tsk)       TASK_SIZE
3050#endif
3051
3052#ifdef CONFIG_MEMCG
3053extern void mm_update_next_owner(struct mm_struct *mm);
3054#else
3055static inline void mm_update_next_owner(struct mm_struct *mm)
3056{
3057}
3058#endif /* CONFIG_MEMCG */
3059
3060static inline unsigned long task_rlimit(const struct task_struct *tsk,
3061                unsigned int limit)
3062{
3063        return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
3064}
3065
3066static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
3067                unsigned int limit)
3068{
3069        return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
3070}
3071
3072static inline unsigned long rlimit(unsigned int limit)
3073{
3074        return task_rlimit(current, limit);
3075}
3076
3077static inline unsigned long rlimit_max(unsigned int limit)
3078{
3079        return task_rlimit_max(current, limit);
3080}
3081
3082#endif
3083