linux/include/linux/sched.h
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_SCHED_H
   3#define _LINUX_SCHED_H
   4
   5/*
   6 * Define 'struct task_struct' and provide the main scheduler
   7 * APIs (schedule(), wakeup variants, etc.)
   8 */
   9
  10#include <uapi/linux/sched.h>
  11
  12#include <asm/current.h>
  13
  14#include <linux/pid.h>
  15#include <linux/sem.h>
  16#include <linux/shm.h>
  17#include <linux/kcov.h>
  18#include <linux/mutex.h>
  19#include <linux/plist.h>
  20#include <linux/hrtimer.h>
  21#include <linux/seccomp.h>
  22#include <linux/nodemask.h>
  23#include <linux/rcupdate.h>
  24#include <linux/resource.h>
  25#include <linux/latencytop.h>
  26#include <linux/sched/prio.h>
  27#include <linux/signal_types.h>
  28#include <linux/mm_types_task.h>
  29#include <linux/task_io_accounting.h>
  30
  31/* task_struct member predeclarations (sorted alphabetically): */
  32struct audit_context;
  33struct backing_dev_info;
  34struct bio_list;
  35struct blk_plug;
  36struct cfs_rq;
  37struct fs_struct;
  38struct futex_pi_state;
  39struct io_context;
  40struct mempolicy;
  41struct nameidata;
  42struct nsproxy;
  43struct perf_event_context;
  44struct pid_namespace;
  45struct pipe_inode_info;
  46struct rcu_node;
  47struct reclaim_state;
  48struct robust_list_head;
  49struct sched_attr;
  50struct sched_param;
  51struct seq_file;
  52struct sighand_struct;
  53struct signal_struct;
  54struct task_delay_info;
  55struct task_group;
  56
  57/*
  58 * Task state bitmask. NOTE! These bits are also
  59 * encoded in fs/proc/array.c: get_task_state().
  60 *
  61 * We have two separate sets of flags: task->state
  62 * is about runnability, while task->exit_state are
  63 * about the task exiting. Confusing, but this way
  64 * modifying one set can't modify the other one by
  65 * mistake.
  66 */
  67
  68/* Used in tsk->state: */
  69#define TASK_RUNNING                    0x0000
  70#define TASK_INTERRUPTIBLE              0x0001
  71#define TASK_UNINTERRUPTIBLE            0x0002
  72#define __TASK_STOPPED                  0x0004
  73#define __TASK_TRACED                   0x0008
  74/* Used in tsk->exit_state: */
  75#define EXIT_DEAD                       0x0010
  76#define EXIT_ZOMBIE                     0x0020
  77#define EXIT_TRACE                      (EXIT_ZOMBIE | EXIT_DEAD)
  78/* Used in tsk->state again: */
  79#define TASK_PARKED                     0x0040
  80#define TASK_DEAD                       0x0080
  81#define TASK_WAKEKILL                   0x0100
  82#define TASK_WAKING                     0x0200
  83#define TASK_NOLOAD                     0x0400
  84#define TASK_NEW                        0x0800
  85#define TASK_STATE_MAX                  0x1000
  86
  87/* Convenience macros for the sake of set_current_state: */
  88#define TASK_KILLABLE                   (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
  89#define TASK_STOPPED                    (TASK_WAKEKILL | __TASK_STOPPED)
  90#define TASK_TRACED                     (TASK_WAKEKILL | __TASK_TRACED)
  91
  92#define TASK_IDLE                       (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
  93
  94/* Convenience macros for the sake of wake_up(): */
  95#define TASK_NORMAL                     (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
  96#define TASK_ALL                        (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
  97
  98/* get_task_state(): */
  99#define TASK_REPORT                     (TASK_RUNNING | TASK_INTERRUPTIBLE | \
 100                                         TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
 101                                         __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
 102                                         TASK_PARKED)
 103
 104#define task_is_traced(task)            ((task->state & __TASK_TRACED) != 0)
 105
 106#define task_is_stopped(task)           ((task->state & __TASK_STOPPED) != 0)
 107
 108#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 109
 110#define task_contributes_to_load(task)  ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
 111                                         (task->flags & PF_FROZEN) == 0 && \
 112                                         (task->state & TASK_NOLOAD) == 0)
 113
 114#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 115
 116#define __set_current_state(state_value)                        \
 117        do {                                                    \
 118                current->task_state_change = _THIS_IP_;         \
 119                current->state = (state_value);                 \
 120        } while (0)
 121#define set_current_state(state_value)                          \
 122        do {                                                    \
 123                current->task_state_change = _THIS_IP_;         \
 124                smp_store_mb(current->state, (state_value));    \
 125        } while (0)
 126
 127#else
 128/*
 129 * set_current_state() includes a barrier so that the write of current->state
 130 * is correctly serialised wrt the caller's subsequent test of whether to
 131 * actually sleep:
 132 *
 133 *   for (;;) {
 134 *      set_current_state(TASK_UNINTERRUPTIBLE);
 135 *      if (!need_sleep)
 136 *              break;
 137 *
 138 *      schedule();
 139 *   }
 140 *   __set_current_state(TASK_RUNNING);
 141 *
 142 * If the caller does not need such serialisation (because, for instance, the
 143 * condition test and condition change and wakeup are under the same lock) then
 144 * use __set_current_state().
 145 *
 146 * The above is typically ordered against the wakeup, which does:
 147 *
 148 *      need_sleep = false;
 149 *      wake_up_state(p, TASK_UNINTERRUPTIBLE);
 150 *
 151 * Where wake_up_state() (and all other wakeup primitives) imply enough
 152 * barriers to order the store of the variable against wakeup.
 153 *
 154 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
 155 * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
 156 * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
 157 *
 158 * This is obviously fine, since they both store the exact same value.
 159 *
 160 * Also see the comments of try_to_wake_up().
 161 */
 162#define __set_current_state(state_value) do { current->state = (state_value); } while (0)
 163#define set_current_state(state_value)   smp_store_mb(current->state, (state_value))
 164#endif
 165
 166/* Task command name length: */
 167#define TASK_COMM_LEN                   16
 168
 169extern cpumask_var_t                    cpu_isolated_map;
 170
 171extern void scheduler_tick(void);
 172
 173#define MAX_SCHEDULE_TIMEOUT            LONG_MAX
 174
 175extern long schedule_timeout(long timeout);
 176extern long schedule_timeout_interruptible(long timeout);
 177extern long schedule_timeout_killable(long timeout);
 178extern long schedule_timeout_uninterruptible(long timeout);
 179extern long schedule_timeout_idle(long timeout);
 180asmlinkage void schedule(void);
 181extern void schedule_preempt_disabled(void);
 182
 183extern int __must_check io_schedule_prepare(void);
 184extern void io_schedule_finish(int token);
 185extern long io_schedule_timeout(long timeout);
 186extern void io_schedule(void);
 187
 188/**
 189 * struct prev_cputime - snapshot of system and user cputime
 190 * @utime: time spent in user mode
 191 * @stime: time spent in system mode
 192 * @lock: protects the above two fields
 193 *
 194 * Stores previous user/system time values such that we can guarantee
 195 * monotonicity.
 196 */
 197struct prev_cputime {
 198#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 199        u64                             utime;
 200        u64                             stime;
 201        raw_spinlock_t                  lock;
 202#endif
 203};
 204
 205/**
 206 * struct task_cputime - collected CPU time counts
 207 * @utime:              time spent in user mode, in nanoseconds
 208 * @stime:              time spent in kernel mode, in nanoseconds
 209 * @sum_exec_runtime:   total time spent on the CPU, in nanoseconds
 210 *
 211 * This structure groups together three kinds of CPU time that are tracked for
 212 * threads and thread groups.  Most things considering CPU time want to group
 213 * these counts together and treat all three of them in parallel.
 214 */
 215struct task_cputime {
 216        u64                             utime;
 217        u64                             stime;
 218        unsigned long long              sum_exec_runtime;
 219};
 220
 221/* Alternate field names when used on cache expirations: */
 222#define virt_exp                        utime
 223#define prof_exp                        stime
 224#define sched_exp                       sum_exec_runtime
 225
 226enum vtime_state {
 227        /* Task is sleeping or running in a CPU with VTIME inactive: */
 228        VTIME_INACTIVE = 0,
 229        /* Task runs in userspace in a CPU with VTIME active: */
 230        VTIME_USER,
 231        /* Task runs in kernelspace in a CPU with VTIME active: */
 232        VTIME_SYS,
 233};
 234
 235struct vtime {
 236        seqcount_t              seqcount;
 237        unsigned long long      starttime;
 238        enum vtime_state        state;
 239        u64                     utime;
 240        u64                     stime;
 241        u64                     gtime;
 242};
 243
 244struct sched_info {
 245#ifdef CONFIG_SCHED_INFO
 246        /* Cumulative counters: */
 247
 248        /* # of times we have run on this CPU: */
 249        unsigned long                   pcount;
 250
 251        /* Time spent waiting on a runqueue: */
 252        unsigned long long              run_delay;
 253
 254        /* Timestamps: */
 255
 256        /* When did we last run on a CPU? */
 257        unsigned long long              last_arrival;
 258
 259        /* When were we last queued to run? */
 260        unsigned long long              last_queued;
 261
 262#endif /* CONFIG_SCHED_INFO */
 263};
 264
 265/*
 266 * Integer metrics need fixed point arithmetic, e.g., sched/fair
 267 * has a few: load, load_avg, util_avg, freq, and capacity.
 268 *
 269 * We define a basic fixed point arithmetic range, and then formalize
 270 * all these metrics based on that basic range.
 271 */
 272# define SCHED_FIXEDPOINT_SHIFT         10
 273# define SCHED_FIXEDPOINT_SCALE         (1L << SCHED_FIXEDPOINT_SHIFT)
 274
 275struct load_weight {
 276        unsigned long                   weight;
 277        u32                             inv_weight;
 278};
 279
 280/*
 281 * The load_avg/util_avg accumulates an infinite geometric series
 282 * (see __update_load_avg() in kernel/sched/fair.c).
 283 *
 284 * [load_avg definition]
 285 *
 286 *   load_avg = runnable% * scale_load_down(load)
 287 *
 288 * where runnable% is the time ratio that a sched_entity is runnable.
 289 * For cfs_rq, it is the aggregated load_avg of all runnable and
 290 * blocked sched_entities.
 291 *
 292 * load_avg may also take frequency scaling into account:
 293 *
 294 *   load_avg = runnable% * scale_load_down(load) * freq%
 295 *
 296 * where freq% is the CPU frequency normalized to the highest frequency.
 297 *
 298 * [util_avg definition]
 299 *
 300 *   util_avg = running% * SCHED_CAPACITY_SCALE
 301 *
 302 * where running% is the time ratio that a sched_entity is running on
 303 * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
 304 * and blocked sched_entities.
 305 *
 306 * util_avg may also factor frequency scaling and CPU capacity scaling:
 307 *
 308 *   util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity%
 309 *
 310 * where freq% is the same as above, and capacity% is the CPU capacity
 311 * normalized to the greatest capacity (due to uarch differences, etc).
 312 *
 313 * N.B., the above ratios (runnable%, running%, freq%, and capacity%)
 314 * themselves are in the range of [0, 1]. To do fixed point arithmetics,
 315 * we therefore scale them to as large a range as necessary. This is for
 316 * example reflected by util_avg's SCHED_CAPACITY_SCALE.
 317 *
 318 * [Overflow issue]
 319 *
 320 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
 321 * with the highest load (=88761), always runnable on a single cfs_rq,
 322 * and should not overflow as the number already hits PID_MAX_LIMIT.
 323 *
 324 * For all other cases (including 32-bit kernels), struct load_weight's
 325 * weight will overflow first before we do, because:
 326 *
 327 *    Max(load_avg) <= Max(load.weight)
 328 *
 329 * Then it is the load_weight's responsibility to consider overflow
 330 * issues.
 331 */
 332struct sched_avg {
 333        u64                             last_update_time;
 334        u64                             load_sum;
 335        u32                             util_sum;
 336        u32                             period_contrib;
 337        unsigned long                   load_avg;
 338        unsigned long                   util_avg;
 339};
 340
 341struct sched_statistics {
 342#ifdef CONFIG_SCHEDSTATS
 343        u64                             wait_start;
 344        u64                             wait_max;
 345        u64                             wait_count;
 346        u64                             wait_sum;
 347        u64                             iowait_count;
 348        u64                             iowait_sum;
 349
 350        u64                             sleep_start;
 351        u64                             sleep_max;
 352        s64                             sum_sleep_runtime;
 353
 354        u64                             block_start;
 355        u64                             block_max;
 356        u64                             exec_max;
 357        u64                             slice_max;
 358
 359        u64                             nr_migrations_cold;
 360        u64                             nr_failed_migrations_affine;
 361        u64                             nr_failed_migrations_running;
 362        u64                             nr_failed_migrations_hot;
 363        u64                             nr_forced_migrations;
 364
 365        u64                             nr_wakeups;
 366        u64                             nr_wakeups_sync;
 367        u64                             nr_wakeups_migrate;
 368        u64                             nr_wakeups_local;
 369        u64                             nr_wakeups_remote;
 370        u64                             nr_wakeups_affine;
 371        u64                             nr_wakeups_affine_attempts;
 372        u64                             nr_wakeups_passive;
 373        u64                             nr_wakeups_idle;
 374#endif
 375};
 376
 377struct sched_entity {
 378        /* For load-balancing: */
 379        struct load_weight              load;
 380        struct rb_node                  run_node;
 381        struct list_head                group_node;
 382        unsigned int                    on_rq;
 383
 384        u64                             exec_start;
 385        u64                             sum_exec_runtime;
 386        u64                             vruntime;
 387        u64                             prev_sum_exec_runtime;
 388
 389        u64                             nr_migrations;
 390
 391        struct sched_statistics         statistics;
 392
 393#ifdef CONFIG_FAIR_GROUP_SCHED
 394        int                             depth;
 395        struct sched_entity             *parent;
 396        /* rq on which this entity is (to be) queued: */
 397        struct cfs_rq                   *cfs_rq;
 398        /* rq "owned" by this entity/group: */
 399        struct cfs_rq                   *my_q;
 400#endif
 401
 402#ifdef CONFIG_SMP
 403        /*
 404         * Per entity load average tracking.
 405         *
 406         * Put into separate cache line so it does not
 407         * collide with read-mostly values above.
 408         */
 409        struct sched_avg                avg ____cacheline_aligned_in_smp;
 410#endif
 411};
 412
 413struct sched_rt_entity {
 414        struct list_head                run_list;
 415        unsigned long                   timeout;
 416        unsigned long                   watchdog_stamp;
 417        unsigned int                    time_slice;
 418        unsigned short                  on_rq;
 419        unsigned short                  on_list;
 420
 421        struct sched_rt_entity          *back;
 422#ifdef CONFIG_RT_GROUP_SCHED
 423        struct sched_rt_entity          *parent;
 424        /* rq on which this entity is (to be) queued: */
 425        struct rt_rq                    *rt_rq;
 426        /* rq "owned" by this entity/group: */
 427        struct rt_rq                    *my_q;
 428#endif
 429} __randomize_layout;
 430
 431struct sched_dl_entity {
 432        struct rb_node                  rb_node;
 433
 434        /*
 435         * Original scheduling parameters. Copied here from sched_attr
 436         * during sched_setattr(), they will remain the same until
 437         * the next sched_setattr().
 438         */
 439        u64                             dl_runtime;     /* Maximum runtime for each instance    */
 440        u64                             dl_deadline;    /* Relative deadline of each instance   */
 441        u64                             dl_period;      /* Separation of two instances (period) */
 442        u64                             dl_bw;          /* dl_runtime / dl_period               */
 443        u64                             dl_density;     /* dl_runtime / dl_deadline             */
 444
 445        /*
 446         * Actual scheduling parameters. Initialized with the values above,
 447         * they are continously updated during task execution. Note that
 448         * the remaining runtime could be < 0 in case we are in overrun.
 449         */
 450        s64                             runtime;        /* Remaining runtime for this instance  */
 451        u64                             deadline;       /* Absolute deadline for this instance  */
 452        unsigned int                    flags;          /* Specifying the scheduler behaviour   */
 453
 454        /*
 455         * Some bool flags:
 456         *
 457         * @dl_throttled tells if we exhausted the runtime. If so, the
 458         * task has to wait for a replenishment to be performed at the
 459         * next firing of dl_timer.
 460         *
 461         * @dl_boosted tells if we are boosted due to DI. If so we are
 462         * outside bandwidth enforcement mechanism (but only until we
 463         * exit the critical section);
 464         *
 465         * @dl_yielded tells if task gave up the CPU before consuming
 466         * all its available runtime during the last job.
 467         *
 468         * @dl_non_contending tells if the task is inactive while still
 469         * contributing to the active utilization. In other words, it
 470         * indicates if the inactive timer has been armed and its handler
 471         * has not been executed yet. This flag is useful to avoid race
 472         * conditions between the inactive timer handler and the wakeup
 473         * code.
 474         */
 475        int                             dl_throttled;
 476        int                             dl_boosted;
 477        int                             dl_yielded;
 478        int                             dl_non_contending;
 479
 480        /*
 481         * Bandwidth enforcement timer. Each -deadline task has its
 482         * own bandwidth to be enforced, thus we need one timer per task.
 483         */
 484        struct hrtimer                  dl_timer;
 485
 486        /*
 487         * Inactive timer, responsible for decreasing the active utilization
 488         * at the "0-lag time". When a -deadline task blocks, it contributes
 489         * to GRUB's active utilization until the "0-lag time", hence a
 490         * timer is needed to decrease the active utilization at the correct
 491         * time.
 492         */
 493        struct hrtimer inactive_timer;
 494};
 495
 496union rcu_special {
 497        struct {
 498                u8                      blocked;
 499                u8                      need_qs;
 500                u8                      exp_need_qs;
 501
 502                /* Otherwise the compiler can store garbage here: */
 503                u8                      pad;
 504        } b; /* Bits. */
 505        u32 s; /* Set of bits. */
 506};
 507
 508enum perf_event_task_context {
 509        perf_invalid_context = -1,
 510        perf_hw_context = 0,
 511        perf_sw_context,
 512        perf_nr_task_contexts,
 513};
 514
 515struct wake_q_node {
 516        struct wake_q_node *next;
 517};
 518
 519struct task_struct {
 520#ifdef CONFIG_THREAD_INFO_IN_TASK
 521        /*
 522         * For reasons of header soup (see current_thread_info()), this
 523         * must be the first element of task_struct.
 524         */
 525        struct thread_info              thread_info;
 526#endif
 527        /* -1 unrunnable, 0 runnable, >0 stopped: */
 528        volatile long                   state;
 529
 530        /*
 531         * This begins the randomizable portion of task_struct. Only
 532         * scheduling-critical items should be added above here.
 533         */
 534        randomized_struct_fields_start
 535
 536        void                            *stack;
 537        atomic_t                        usage;
 538        /* Per task flags (PF_*), defined further below: */
 539        unsigned int                    flags;
 540        unsigned int                    ptrace;
 541
 542#ifdef CONFIG_SMP
 543        struct llist_node               wake_entry;
 544        int                             on_cpu;
 545#ifdef CONFIG_THREAD_INFO_IN_TASK
 546        /* Current CPU: */
 547        unsigned int                    cpu;
 548#endif
 549        unsigned int                    wakee_flips;
 550        unsigned long                   wakee_flip_decay_ts;
 551        struct task_struct              *last_wakee;
 552
 553        int                             wake_cpu;
 554#endif
 555        int                             on_rq;
 556
 557        int                             prio;
 558        int                             static_prio;
 559        int                             normal_prio;
 560        unsigned int                    rt_priority;
 561
 562        const struct sched_class        *sched_class;
 563        struct sched_entity             se;
 564        struct sched_rt_entity          rt;
 565#ifdef CONFIG_CGROUP_SCHED
 566        struct task_group               *sched_task_group;
 567#endif
 568        struct sched_dl_entity          dl;
 569
 570#ifdef CONFIG_PREEMPT_NOTIFIERS
 571        /* List of struct preempt_notifier: */
 572        struct hlist_head               preempt_notifiers;
 573#endif
 574
 575#ifdef CONFIG_BLK_DEV_IO_TRACE
 576        unsigned int                    btrace_seq;
 577#endif
 578
 579        unsigned int                    policy;
 580        int                             nr_cpus_allowed;
 581        cpumask_t                       cpus_allowed;
 582
 583#ifdef CONFIG_PREEMPT_RCU
 584        int                             rcu_read_lock_nesting;
 585        union rcu_special               rcu_read_unlock_special;
 586        struct list_head                rcu_node_entry;
 587        struct rcu_node                 *rcu_blocked_node;
 588#endif /* #ifdef CONFIG_PREEMPT_RCU */
 589
 590#ifdef CONFIG_TASKS_RCU
 591        unsigned long                   rcu_tasks_nvcsw;
 592        u8                              rcu_tasks_holdout;
 593        u8                              rcu_tasks_idx;
 594        int                             rcu_tasks_idle_cpu;
 595        struct list_head                rcu_tasks_holdout_list;
 596#endif /* #ifdef CONFIG_TASKS_RCU */
 597
 598        struct sched_info               sched_info;
 599
 600        struct list_head                tasks;
 601#ifdef CONFIG_SMP
 602        struct plist_node               pushable_tasks;
 603        struct rb_node                  pushable_dl_tasks;
 604#endif
 605
 606        struct mm_struct                *mm;
 607        struct mm_struct                *active_mm;
 608
 609        /* Per-thread vma caching: */
 610        struct vmacache                 vmacache;
 611
 612#ifdef SPLIT_RSS_COUNTING
 613        struct task_rss_stat            rss_stat;
 614#endif
 615        int                             exit_state;
 616        int                             exit_code;
 617        int                             exit_signal;
 618        /* The signal sent when the parent dies: */
 619        int                             pdeath_signal;
 620        /* JOBCTL_*, siglock protected: */
 621        unsigned long                   jobctl;
 622
 623        /* Used for emulating ABI behavior of previous Linux versions: */
 624        unsigned int                    personality;
 625
 626        /* Scheduler bits, serialized by scheduler locks: */
 627        unsigned                        sched_reset_on_fork:1;
 628        unsigned                        sched_contributes_to_load:1;
 629        unsigned                        sched_migrated:1;
 630        unsigned                        sched_remote_wakeup:1;
 631        /* Force alignment to the next boundary: */
 632        unsigned                        :0;
 633
 634        /* Unserialized, strictly 'current' */
 635
 636        /* Bit to tell LSMs we're in execve(): */
 637        unsigned                        in_execve:1;
 638        unsigned                        in_iowait:1;
 639#ifndef TIF_RESTORE_SIGMASK
 640        unsigned                        restore_sigmask:1;
 641#endif
 642#ifdef CONFIG_MEMCG
 643        unsigned                        memcg_may_oom:1;
 644#ifndef CONFIG_SLOB
 645        unsigned                        memcg_kmem_skip_account:1;
 646#endif
 647#endif
 648#ifdef CONFIG_COMPAT_BRK
 649        unsigned                        brk_randomized:1;
 650#endif
 651#ifdef CONFIG_CGROUPS
 652        /* disallow userland-initiated cgroup migration */
 653        unsigned                        no_cgroup_migration:1;
 654#endif
 655
 656        unsigned long                   atomic_flags; /* Flags requiring atomic access. */
 657
 658        struct restart_block            restart_block;
 659
 660        pid_t                           pid;
 661        pid_t                           tgid;
 662
 663#ifdef CONFIG_CC_STACKPROTECTOR
 664        /* Canary value for the -fstack-protector GCC feature: */
 665        unsigned long                   stack_canary;
 666#endif
 667        /*
 668         * Pointers to the (original) parent process, youngest child, younger sibling,
 669         * older sibling, respectively.  (p->father can be replaced with
 670         * p->real_parent->pid)
 671         */
 672
 673        /* Real parent process: */
 674        struct task_struct __rcu        *real_parent;
 675
 676        /* Recipient of SIGCHLD, wait4() reports: */
 677        struct task_struct __rcu        *parent;
 678
 679        /*
 680         * Children/sibling form the list of natural children:
 681         */
 682        struct list_head                children;
 683        struct list_head                sibling;
 684        struct task_struct              *group_leader;
 685
 686        /*
 687         * 'ptraced' is the list of tasks this task is using ptrace() on.
 688         *
 689         * This includes both natural children and PTRACE_ATTACH targets.
 690         * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
 691         */
 692        struct list_head                ptraced;
 693        struct list_head                ptrace_entry;
 694
 695        /* PID/PID hash table linkage. */
 696        struct pid_link                 pids[PIDTYPE_MAX];
 697        struct list_head                thread_group;
 698        struct list_head                thread_node;
 699
 700        struct completion               *vfork_done;
 701
 702        /* CLONE_CHILD_SETTID: */
 703        int __user                      *set_child_tid;
 704
 705        /* CLONE_CHILD_CLEARTID: */
 706        int __user                      *clear_child_tid;
 707
 708        u64                             utime;
 709        u64                             stime;
 710#ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
 711        u64                             utimescaled;
 712        u64                             stimescaled;
 713#endif
 714        u64                             gtime;
 715        struct prev_cputime             prev_cputime;
 716#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 717        struct vtime                    vtime;
 718#endif
 719
 720#ifdef CONFIG_NO_HZ_FULL
 721        atomic_t                        tick_dep_mask;
 722#endif
 723        /* Context switch counts: */
 724        unsigned long                   nvcsw;
 725        unsigned long                   nivcsw;
 726
 727        /* Monotonic time in nsecs: */
 728        u64                             start_time;
 729
 730        /* Boot based time in nsecs: */
 731        u64                             real_start_time;
 732
 733        /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
 734        unsigned long                   min_flt;
 735        unsigned long                   maj_flt;
 736
 737#ifdef CONFIG_POSIX_TIMERS
 738        struct task_cputime             cputime_expires;
 739        struct list_head                cpu_timers[3];
 740#endif
 741
 742        /* Process credentials: */
 743
 744        /* Tracer's credentials at attach: */
 745        const struct cred __rcu         *ptracer_cred;
 746
 747        /* Objective and real subjective task credentials (COW): */
 748        const struct cred __rcu         *real_cred;
 749
 750        /* Effective (overridable) subjective task credentials (COW): */
 751        const struct cred __rcu         *cred;
 752
 753        /*
 754         * executable name, excluding path.
 755         *
 756         * - normally initialized setup_new_exec()
 757         * - access it with [gs]et_task_comm()
 758         * - lock it with task_lock()
 759         */
 760        char                            comm[TASK_COMM_LEN];
 761
 762        struct nameidata                *nameidata;
 763
 764#ifdef CONFIG_SYSVIPC
 765        struct sysv_sem                 sysvsem;
 766        struct sysv_shm                 sysvshm;
 767#endif
 768#ifdef CONFIG_DETECT_HUNG_TASK
 769        unsigned long                   last_switch_count;
 770#endif
 771        /* Filesystem information: */
 772        struct fs_struct                *fs;
 773
 774        /* Open file information: */
 775        struct files_struct             *files;
 776
 777        /* Namespaces: */
 778        struct nsproxy                  *nsproxy;
 779
 780        /* Signal handlers: */
 781        struct signal_struct            *signal;
 782        struct sighand_struct           *sighand;
 783        sigset_t                        blocked;
 784        sigset_t                        real_blocked;
 785        /* Restored if set_restore_sigmask() was used: */
 786        sigset_t                        saved_sigmask;
 787        struct sigpending               pending;
 788        unsigned long                   sas_ss_sp;
 789        size_t                          sas_ss_size;
 790        unsigned int                    sas_ss_flags;
 791
 792        struct callback_head            *task_works;
 793
 794        struct audit_context            *audit_context;
 795#ifdef CONFIG_AUDITSYSCALL
 796        kuid_t                          loginuid;
 797        unsigned int                    sessionid;
 798#endif
 799        struct seccomp                  seccomp;
 800
 801        /* Thread group tracking: */
 802        u32                             parent_exec_id;
 803        u32                             self_exec_id;
 804
 805        /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
 806        spinlock_t                      alloc_lock;
 807
 808        /* Protection of the PI data structures: */
 809        raw_spinlock_t                  pi_lock;
 810
 811        struct wake_q_node              wake_q;
 812
 813#ifdef CONFIG_RT_MUTEXES
 814        /* PI waiters blocked on a rt_mutex held by this task: */
 815        struct rb_root_cached           pi_waiters;
 816        /* Updated under owner's pi_lock and rq lock */
 817        struct task_struct              *pi_top_task;
 818        /* Deadlock detection and priority inheritance handling: */
 819        struct rt_mutex_waiter          *pi_blocked_on;
 820#endif
 821
 822#ifdef CONFIG_DEBUG_MUTEXES
 823        /* Mutex deadlock detection: */
 824        struct mutex_waiter             *blocked_on;
 825#endif
 826
 827#ifdef CONFIG_TRACE_IRQFLAGS
 828        unsigned int                    irq_events;
 829        unsigned long                   hardirq_enable_ip;
 830        unsigned long                   hardirq_disable_ip;
 831        unsigned int                    hardirq_enable_event;
 832        unsigned int                    hardirq_disable_event;
 833        int                             hardirqs_enabled;
 834        int                             hardirq_context;
 835        unsigned long                   softirq_disable_ip;
 836        unsigned long                   softirq_enable_ip;
 837        unsigned int                    softirq_disable_event;
 838        unsigned int                    softirq_enable_event;
 839        int                             softirqs_enabled;
 840        int                             softirq_context;
 841#endif
 842
 843#ifdef CONFIG_LOCKDEP
 844# define MAX_LOCK_DEPTH                 48UL
 845        u64                             curr_chain_key;
 846        int                             lockdep_depth;
 847        unsigned int                    lockdep_recursion;
 848        struct held_lock                held_locks[MAX_LOCK_DEPTH];
 849#endif
 850
 851#ifdef CONFIG_LOCKDEP_CROSSRELEASE
 852#define MAX_XHLOCKS_NR 64UL
 853        struct hist_lock *xhlocks; /* Crossrelease history locks */
 854        unsigned int xhlock_idx;
 855        /* For restoring at history boundaries */
 856        unsigned int xhlock_idx_hist[XHLOCK_CTX_NR];
 857        unsigned int hist_id;
 858        /* For overwrite check at each context exit */
 859        unsigned int hist_id_save[XHLOCK_CTX_NR];
 860#endif
 861
 862#ifdef CONFIG_UBSAN
 863        unsigned int                    in_ubsan;
 864#endif
 865
 866        /* Journalling filesystem info: */
 867        void                            *journal_info;
 868
 869        /* Stacked block device info: */
 870        struct bio_list                 *bio_list;
 871
 872#ifdef CONFIG_BLOCK
 873        /* Stack plugging: */
 874        struct blk_plug                 *plug;
 875#endif
 876
 877        /* VM state: */
 878        struct reclaim_state            *reclaim_state;
 879
 880        struct backing_dev_info         *backing_dev_info;
 881
 882        struct io_context               *io_context;
 883
 884        /* Ptrace state: */
 885        unsigned long                   ptrace_message;
 886        siginfo_t                       *last_siginfo;
 887
 888        struct task_io_accounting       ioac;
 889#ifdef CONFIG_TASK_XACCT
 890        /* Accumulated RSS usage: */
 891        u64                             acct_rss_mem1;
 892        /* Accumulated virtual memory usage: */
 893        u64                             acct_vm_mem1;
 894        /* stime + utime since last update: */
 895        u64                             acct_timexpd;
 896#endif
 897#ifdef CONFIG_CPUSETS
 898        /* Protected by ->alloc_lock: */
 899        nodemask_t                      mems_allowed;
 900        /* Seqence number to catch updates: */
 901        seqcount_t                      mems_allowed_seq;
 902        int                             cpuset_mem_spread_rotor;
 903        int                             cpuset_slab_spread_rotor;
 904#endif
 905#ifdef CONFIG_CGROUPS
 906        /* Control Group info protected by css_set_lock: */
 907        struct css_set __rcu            *cgroups;
 908        /* cg_list protected by css_set_lock and tsk->alloc_lock: */
 909        struct list_head                cg_list;
 910#endif
 911#ifdef CONFIG_INTEL_RDT
 912        u32                             closid;
 913        u32                             rmid;
 914#endif
 915#ifdef CONFIG_FUTEX
 916        struct robust_list_head __user  *robust_list;
 917#ifdef CONFIG_COMPAT
 918        struct compat_robust_list_head __user *compat_robust_list;
 919#endif
 920        struct list_head                pi_state_list;
 921        struct futex_pi_state           *pi_state_cache;
 922#endif
 923#ifdef CONFIG_PERF_EVENTS
 924        struct perf_event_context       *perf_event_ctxp[perf_nr_task_contexts];
 925        struct mutex                    perf_event_mutex;
 926        struct list_head                perf_event_list;
 927#endif
 928#ifdef CONFIG_DEBUG_PREEMPT
 929        unsigned long                   preempt_disable_ip;
 930#endif
 931#ifdef CONFIG_NUMA
 932        /* Protected by alloc_lock: */
 933        struct mempolicy                *mempolicy;
 934        short                           il_prev;
 935        short                           pref_node_fork;
 936#endif
 937#ifdef CONFIG_NUMA_BALANCING
 938        int                             numa_scan_seq;
 939        unsigned int                    numa_scan_period;
 940        unsigned int                    numa_scan_period_max;
 941        int                             numa_preferred_nid;
 942        unsigned long                   numa_migrate_retry;
 943        /* Migration stamp: */
 944        u64                             node_stamp;
 945        u64                             last_task_numa_placement;
 946        u64                             last_sum_exec_runtime;
 947        struct callback_head            numa_work;
 948
 949        struct list_head                numa_entry;
 950        struct numa_group               *numa_group;
 951
 952        /*
 953         * numa_faults is an array split into four regions:
 954         * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
 955         * in this precise order.
 956         *
 957         * faults_memory: Exponential decaying average of faults on a per-node
 958         * basis. Scheduling placement decisions are made based on these
 959         * counts. The values remain static for the duration of a PTE scan.
 960         * faults_cpu: Track the nodes the process was running on when a NUMA
 961         * hinting fault was incurred.
 962         * faults_memory_buffer and faults_cpu_buffer: Record faults per node
 963         * during the current scan window. When the scan completes, the counts
 964         * in faults_memory and faults_cpu decay and these values are copied.
 965         */
 966        unsigned long                   *numa_faults;
 967        unsigned long                   total_numa_faults;
 968
 969        /*
 970         * numa_faults_locality tracks if faults recorded during the last
 971         * scan window were remote/local or failed to migrate. The task scan
 972         * period is adapted based on the locality of the faults with different
 973         * weights depending on whether they were shared or private faults
 974         */
 975        unsigned long                   numa_faults_locality[3];
 976
 977        unsigned long                   numa_pages_migrated;
 978#endif /* CONFIG_NUMA_BALANCING */
 979
 980        struct tlbflush_unmap_batch     tlb_ubc;
 981
 982        struct rcu_head                 rcu;
 983
 984        /* Cache last used pipe for splice(): */
 985        struct pipe_inode_info          *splice_pipe;
 986
 987        struct page_frag                task_frag;
 988
 989#ifdef CONFIG_TASK_DELAY_ACCT
 990        struct task_delay_info          *delays;
 991#endif
 992
 993#ifdef CONFIG_FAULT_INJECTION
 994        int                             make_it_fail;
 995        unsigned int                    fail_nth;
 996#endif
 997        /*
 998         * When (nr_dirtied >= nr_dirtied_pause), it's time to call
 999         * balance_dirty_pages() for a dirty throttling pause:
1000         */
1001        int                             nr_dirtied;
1002        int                             nr_dirtied_pause;
1003        /* Start of a write-and-pause period: */
1004        unsigned long                   dirty_paused_when;
1005
1006#ifdef CONFIG_LATENCYTOP
1007        int                             latency_record_count;
1008        struct latency_record           latency_record[LT_SAVECOUNT];
1009#endif
1010        /*
1011         * Time slack values; these are used to round up poll() and
1012         * select() etc timeout values. These are in nanoseconds.
1013         */
1014        u64                             timer_slack_ns;
1015        u64                             default_timer_slack_ns;
1016
1017#ifdef CONFIG_KASAN
1018        unsigned int                    kasan_depth;
1019#endif
1020
1021#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1022        /* Index of current stored address in ret_stack: */
1023        int                             curr_ret_stack;
1024
1025        /* Stack of return addresses for return function tracing: */
1026        struct ftrace_ret_stack         *ret_stack;
1027
1028        /* Timestamp for last schedule: */
1029        unsigned long long              ftrace_timestamp;
1030
1031        /*
1032         * Number of functions that haven't been traced
1033         * because of depth overrun:
1034         */
1035        atomic_t                        trace_overrun;
1036
1037        /* Pause tracing: */
1038        atomic_t                        tracing_graph_pause;
1039#endif
1040
1041#ifdef CONFIG_TRACING
1042        /* State flags for use by tracers: */
1043        unsigned long                   trace;
1044
1045        /* Bitmask and counter of trace recursion: */
1046        unsigned long                   trace_recursion;
1047#endif /* CONFIG_TRACING */
1048
1049#ifdef CONFIG_KCOV
1050        /* Coverage collection mode enabled for this task (0 if disabled): */
1051        enum kcov_mode                  kcov_mode;
1052
1053        /* Size of the kcov_area: */
1054        unsigned int                    kcov_size;
1055
1056        /* Buffer for coverage collection: */
1057        void                            *kcov_area;
1058
1059        /* KCOV descriptor wired with this task or NULL: */
1060        struct kcov                     *kcov;
1061#endif
1062
1063#ifdef CONFIG_MEMCG
1064        struct mem_cgroup               *memcg_in_oom;
1065        gfp_t                           memcg_oom_gfp_mask;
1066        int                             memcg_oom_order;
1067
1068        /* Number of pages to reclaim on returning to userland: */
1069        unsigned int                    memcg_nr_pages_over_high;
1070#endif
1071
1072#ifdef CONFIG_UPROBES
1073        struct uprobe_task              *utask;
1074#endif
1075#if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1076        unsigned int                    sequential_io;
1077        unsigned int                    sequential_io_avg;
1078#endif
1079#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1080        unsigned long                   task_state_change;
1081#endif
1082        int                             pagefault_disabled;
1083#ifdef CONFIG_MMU
1084        struct task_struct              *oom_reaper_list;
1085#endif
1086#ifdef CONFIG_VMAP_STACK
1087        struct vm_struct                *stack_vm_area;
1088#endif
1089#ifdef CONFIG_THREAD_INFO_IN_TASK
1090        /* A live task holds one reference: */
1091        atomic_t                        stack_refcount;
1092#endif
1093#ifdef CONFIG_LIVEPATCH
1094        int patch_state;
1095#endif
1096#ifdef CONFIG_SECURITY
1097        /* Used by LSM modules for access restriction: */
1098        void                            *security;
1099#endif
1100
1101        /*
1102         * New fields for task_struct should be added above here, so that
1103         * they are included in the randomized portion of task_struct.
1104         */
1105        randomized_struct_fields_end
1106
1107        /* CPU-specific state of this task: */
1108        struct thread_struct            thread;
1109
1110        /*
1111         * WARNING: on x86, 'thread_struct' contains a variable-sized
1112         * structure.  It *MUST* be at the end of 'task_struct'.
1113         *
1114         * Do not put anything below here!
1115         */
1116};
1117
1118static inline struct pid *task_pid(struct task_struct *task)
1119{
1120        return task->pids[PIDTYPE_PID].pid;
1121}
1122
1123static inline struct pid *task_tgid(struct task_struct *task)
1124{
1125        return task->group_leader->pids[PIDTYPE_PID].pid;
1126}
1127
1128/*
1129 * Without tasklist or RCU lock it is not safe to dereference
1130 * the result of task_pgrp/task_session even if task == current,
1131 * we can race with another thread doing sys_setsid/sys_setpgid.
1132 */
1133static inline struct pid *task_pgrp(struct task_struct *task)
1134{
1135        return task->group_leader->pids[PIDTYPE_PGID].pid;
1136}
1137
1138static inline struct pid *task_session(struct task_struct *task)
1139{
1140        return task->group_leader->pids[PIDTYPE_SID].pid;
1141}
1142
1143/*
1144 * the helpers to get the task's different pids as they are seen
1145 * from various namespaces
1146 *
1147 * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1148 * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1149 *                     current.
1150 * task_xid_nr_ns()  : id seen from the ns specified;
1151 *
1152 * see also pid_nr() etc in include/linux/pid.h
1153 */
1154pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1155
1156static inline pid_t task_pid_nr(struct task_struct *tsk)
1157{
1158        return tsk->pid;
1159}
1160
1161static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1162{
1163        return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1164}
1165
1166static inline pid_t task_pid_vnr(struct task_struct *tsk)
1167{
1168        return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1169}
1170
1171
1172static inline pid_t task_tgid_nr(struct task_struct *tsk)
1173{
1174        return tsk->tgid;
1175}
1176
1177/**
1178 * pid_alive - check that a task structure is not stale
1179 * @p: Task structure to be checked.
1180 *
1181 * Test if a process is not yet dead (at most zombie state)
1182 * If pid_alive fails, then pointers within the task structure
1183 * can be stale and must not be dereferenced.
1184 *
1185 * Return: 1 if the process is alive. 0 otherwise.
1186 */
1187static inline int pid_alive(const struct task_struct *p)
1188{
1189        return p->pids[PIDTYPE_PID].pid != NULL;
1190}
1191
1192static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1193{
1194        return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1195}
1196
1197static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1198{
1199        return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1200}
1201
1202
1203static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1204{
1205        return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1206}
1207
1208static inline pid_t task_session_vnr(struct task_struct *tsk)
1209{
1210        return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1211}
1212
1213static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1214{
1215        return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
1216}
1217
1218static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1219{
1220        return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
1221}
1222
1223static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1224{
1225        pid_t pid = 0;
1226
1227        rcu_read_lock();
1228        if (pid_alive(tsk))
1229                pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1230        rcu_read_unlock();
1231
1232        return pid;
1233}
1234
1235static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1236{
1237        return task_ppid_nr_ns(tsk, &init_pid_ns);
1238}
1239
1240/* Obsolete, do not use: */
1241static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1242{
1243        return task_pgrp_nr_ns(tsk, &init_pid_ns);
1244}
1245
1246#define TASK_REPORT_IDLE        (TASK_REPORT + 1)
1247#define TASK_REPORT_MAX         (TASK_REPORT_IDLE << 1)
1248
1249static inline unsigned int __get_task_state(struct task_struct *tsk)
1250{
1251        unsigned int tsk_state = READ_ONCE(tsk->state);
1252        unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1253
1254        BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1255
1256        if (tsk_state == TASK_IDLE)
1257                state = TASK_REPORT_IDLE;
1258
1259        return fls(state);
1260}
1261
1262static inline char __task_state_to_char(unsigned int state)
1263{
1264        static const char state_char[] = "RSDTtXZPI";
1265
1266        BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1267
1268        return state_char[state];
1269}
1270
1271static inline char task_state_to_char(struct task_struct *tsk)
1272{
1273        return __task_state_to_char(__get_task_state(tsk));
1274}
1275
1276/**
1277 * is_global_init - check if a task structure is init. Since init
1278 * is free to have sub-threads we need to check tgid.
1279 * @tsk: Task structure to be checked.
1280 *
1281 * Check if a task structure is the first user space task the kernel created.
1282 *
1283 * Return: 1 if the task structure is init. 0 otherwise.
1284 */
1285static inline int is_global_init(struct task_struct *tsk)
1286{
1287        return task_tgid_nr(tsk) == 1;
1288}
1289
1290extern struct pid *cad_pid;
1291
1292/*
1293 * Per process flags
1294 */
1295#define PF_IDLE                 0x00000002      /* I am an IDLE thread */
1296#define PF_EXITING              0x00000004      /* Getting shut down */
1297#define PF_EXITPIDONE           0x00000008      /* PI exit done on shut down */
1298#define PF_VCPU                 0x00000010      /* I'm a virtual CPU */
1299#define PF_WQ_WORKER            0x00000020      /* I'm a workqueue worker */
1300#define PF_FORKNOEXEC           0x00000040      /* Forked but didn't exec */
1301#define PF_MCE_PROCESS          0x00000080      /* Process policy on mce errors */
1302#define PF_SUPERPRIV            0x00000100      /* Used super-user privileges */
1303#define PF_DUMPCORE             0x00000200      /* Dumped core */
1304#define PF_SIGNALED             0x00000400      /* Killed by a signal */
1305#define PF_MEMALLOC             0x00000800      /* Allocating memory */
1306#define PF_NPROC_EXCEEDED       0x00001000      /* set_user() noticed that RLIMIT_NPROC was exceeded */
1307#define PF_USED_MATH            0x00002000      /* If unset the fpu must be initialized before use */
1308#define PF_USED_ASYNC           0x00004000      /* Used async_schedule*(), used by module init */
1309#define PF_NOFREEZE             0x00008000      /* This thread should not be frozen */
1310#define PF_FROZEN               0x00010000      /* Frozen for system suspend */
1311#define PF_KSWAPD               0x00020000      /* I am kswapd */
1312#define PF_MEMALLOC_NOFS        0x00040000      /* All allocation requests will inherit GFP_NOFS */
1313#define PF_MEMALLOC_NOIO        0x00080000      /* All allocation requests will inherit GFP_NOIO */
1314#define PF_LESS_THROTTLE        0x00100000      /* Throttle me less: I clean memory */
1315#define PF_KTHREAD              0x00200000      /* I am a kernel thread */
1316#define PF_RANDOMIZE            0x00400000      /* Randomize virtual address space */
1317#define PF_SWAPWRITE            0x00800000      /* Allowed to write to swap */
1318#define PF_NO_SETAFFINITY       0x04000000      /* Userland is not allowed to meddle with cpus_allowed */
1319#define PF_MCE_EARLY            0x08000000      /* Early kill for mce process policy */
1320#define PF_MUTEX_TESTER         0x20000000      /* Thread belongs to the rt mutex tester */
1321#define PF_FREEZER_SKIP         0x40000000      /* Freezer should not count it as freezable */
1322#define PF_SUSPEND_TASK         0x80000000      /* This thread called freeze_processes() and should not be frozen */
1323
1324/*
1325 * Only the _current_ task can read/write to tsk->flags, but other
1326 * tasks can access tsk->flags in readonly mode for example
1327 * with tsk_used_math (like during threaded core dumping).
1328 * There is however an exception to this rule during ptrace
1329 * or during fork: the ptracer task is allowed to write to the
1330 * child->flags of its traced child (same goes for fork, the parent
1331 * can write to the child->flags), because we're guaranteed the
1332 * child is not running and in turn not changing child->flags
1333 * at the same time the parent does it.
1334 */
1335#define clear_stopped_child_used_math(child)    do { (child)->flags &= ~PF_USED_MATH; } while (0)
1336#define set_stopped_child_used_math(child)      do { (child)->flags |= PF_USED_MATH; } while (0)
1337#define clear_used_math()                       clear_stopped_child_used_math(current)
1338#define set_used_math()                         set_stopped_child_used_math(current)
1339
1340#define conditional_stopped_child_used_math(condition, child) \
1341        do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1342
1343#define conditional_used_math(condition)        conditional_stopped_child_used_math(condition, current)
1344
1345#define copy_to_stopped_child_used_math(child) \
1346        do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1347
1348/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1349#define tsk_used_math(p)                        ((p)->flags & PF_USED_MATH)
1350#define used_math()                             tsk_used_math(current)
1351
1352static inline bool is_percpu_thread(void)
1353{
1354#ifdef CONFIG_SMP
1355        return (current->flags & PF_NO_SETAFFINITY) &&
1356                (current->nr_cpus_allowed  == 1);
1357#else
1358        return true;
1359#endif
1360}
1361
1362/* Per-process atomic flags. */
1363#define PFA_NO_NEW_PRIVS                0       /* May not gain new privileges. */
1364#define PFA_SPREAD_PAGE                 1       /* Spread page cache over cpuset */
1365#define PFA_SPREAD_SLAB                 2       /* Spread some slab caches over cpuset */
1366
1367
1368#define TASK_PFA_TEST(name, func)                                       \
1369        static inline bool task_##func(struct task_struct *p)           \
1370        { return test_bit(PFA_##name, &p->atomic_flags); }
1371
1372#define TASK_PFA_SET(name, func)                                        \
1373        static inline void task_set_##func(struct task_struct *p)       \
1374        { set_bit(PFA_##name, &p->atomic_flags); }
1375
1376#define TASK_PFA_CLEAR(name, func)                                      \
1377        static inline void task_clear_##func(struct task_struct *p)     \
1378        { clear_bit(PFA_##name, &p->atomic_flags); }
1379
1380TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1381TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1382
1383TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1384TASK_PFA_SET(SPREAD_PAGE, spread_page)
1385TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1386
1387TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1388TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1389TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1390
1391static inline void
1392current_restore_flags(unsigned long orig_flags, unsigned long flags)
1393{
1394        current->flags &= ~flags;
1395        current->flags |= orig_flags & flags;
1396}
1397
1398extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1399extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1400#ifdef CONFIG_SMP
1401extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1402extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1403#else
1404static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1405{
1406}
1407static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1408{
1409        if (!cpumask_test_cpu(0, new_mask))
1410                return -EINVAL;
1411        return 0;
1412}
1413#endif
1414
1415#ifndef cpu_relax_yield
1416#define cpu_relax_yield() cpu_relax()
1417#endif
1418
1419extern int yield_to(struct task_struct *p, bool preempt);
1420extern void set_user_nice(struct task_struct *p, long nice);
1421extern int task_prio(const struct task_struct *p);
1422
1423/**
1424 * task_nice - return the nice value of a given task.
1425 * @p: the task in question.
1426 *
1427 * Return: The nice value [ -20 ... 0 ... 19 ].
1428 */
1429static inline int task_nice(const struct task_struct *p)
1430{
1431        return PRIO_TO_NICE((p)->static_prio);
1432}
1433
1434extern int can_nice(const struct task_struct *p, const int nice);
1435extern int task_curr(const struct task_struct *p);
1436extern int idle_cpu(int cpu);
1437extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1438extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1439extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1440extern struct task_struct *idle_task(int cpu);
1441
1442/**
1443 * is_idle_task - is the specified task an idle task?
1444 * @p: the task in question.
1445 *
1446 * Return: 1 if @p is an idle task. 0 otherwise.
1447 */
1448static inline bool is_idle_task(const struct task_struct *p)
1449{
1450        return !!(p->flags & PF_IDLE);
1451}
1452
1453extern struct task_struct *curr_task(int cpu);
1454extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1455
1456void yield(void);
1457
1458union thread_union {
1459#ifndef CONFIG_THREAD_INFO_IN_TASK
1460        struct thread_info thread_info;
1461#endif
1462        unsigned long stack[THREAD_SIZE/sizeof(long)];
1463};
1464
1465#ifdef CONFIG_THREAD_INFO_IN_TASK
1466static inline struct thread_info *task_thread_info(struct task_struct *task)
1467{
1468        return &task->thread_info;
1469}
1470#elif !defined(__HAVE_THREAD_FUNCTIONS)
1471# define task_thread_info(task) ((struct thread_info *)(task)->stack)
1472#endif
1473
1474/*
1475 * find a task by one of its numerical ids
1476 *
1477 * find_task_by_pid_ns():
1478 *      finds a task by its pid in the specified namespace
1479 * find_task_by_vpid():
1480 *      finds a task by its virtual pid
1481 *
1482 * see also find_vpid() etc in include/linux/pid.h
1483 */
1484
1485extern struct task_struct *find_task_by_vpid(pid_t nr);
1486extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1487
1488extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1489extern int wake_up_process(struct task_struct *tsk);
1490extern void wake_up_new_task(struct task_struct *tsk);
1491
1492#ifdef CONFIG_SMP
1493extern void kick_process(struct task_struct *tsk);
1494#else
1495static inline void kick_process(struct task_struct *tsk) { }
1496#endif
1497
1498extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1499
1500static inline void set_task_comm(struct task_struct *tsk, const char *from)
1501{
1502        __set_task_comm(tsk, from, false);
1503}
1504
1505extern char *get_task_comm(char *to, struct task_struct *tsk);
1506
1507#ifdef CONFIG_SMP
1508void scheduler_ipi(void);
1509extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1510#else
1511static inline void scheduler_ipi(void) { }
1512static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1513{
1514        return 1;
1515}
1516#endif
1517
1518/*
1519 * Set thread flags in other task's structures.
1520 * See asm/thread_info.h for TIF_xxxx flags available:
1521 */
1522static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1523{
1524        set_ti_thread_flag(task_thread_info(tsk), flag);
1525}
1526
1527static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1528{
1529        clear_ti_thread_flag(task_thread_info(tsk), flag);
1530}
1531
1532static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1533{
1534        return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1535}
1536
1537static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1538{
1539        return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1540}
1541
1542static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1543{
1544        return test_ti_thread_flag(task_thread_info(tsk), flag);
1545}
1546
1547static inline void set_tsk_need_resched(struct task_struct *tsk)
1548{
1549        set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1550}
1551
1552static inline void clear_tsk_need_resched(struct task_struct *tsk)
1553{
1554        clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1555}
1556
1557static inline int test_tsk_need_resched(struct task_struct *tsk)
1558{
1559        return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1560}
1561
1562/*
1563 * cond_resched() and cond_resched_lock(): latency reduction via
1564 * explicit rescheduling in places that are safe. The return
1565 * value indicates whether a reschedule was done in fact.
1566 * cond_resched_lock() will drop the spinlock before scheduling,
1567 * cond_resched_softirq() will enable bhs before scheduling.
1568 */
1569#ifndef CONFIG_PREEMPT
1570extern int _cond_resched(void);
1571#else
1572static inline int _cond_resched(void) { return 0; }
1573#endif
1574
1575#define cond_resched() ({                       \
1576        ___might_sleep(__FILE__, __LINE__, 0);  \
1577        _cond_resched();                        \
1578})
1579
1580extern int __cond_resched_lock(spinlock_t *lock);
1581
1582#define cond_resched_lock(lock) ({                              \
1583        ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1584        __cond_resched_lock(lock);                              \
1585})
1586
1587extern int __cond_resched_softirq(void);
1588
1589#define cond_resched_softirq() ({                                       \
1590        ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);     \
1591        __cond_resched_softirq();                                       \
1592})
1593
1594static inline void cond_resched_rcu(void)
1595{
1596#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1597        rcu_read_unlock();
1598        cond_resched();
1599        rcu_read_lock();
1600#endif
1601}
1602
1603/*
1604 * Does a critical section need to be broken due to another
1605 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
1606 * but a general need for low latency)
1607 */
1608static inline int spin_needbreak(spinlock_t *lock)
1609{
1610#ifdef CONFIG_PREEMPT
1611        return spin_is_contended(lock);
1612#else
1613        return 0;
1614#endif
1615}
1616
1617static __always_inline bool need_resched(void)
1618{
1619        return unlikely(tif_need_resched());
1620}
1621
1622/*
1623 * Wrappers for p->thread_info->cpu access. No-op on UP.
1624 */
1625#ifdef CONFIG_SMP
1626
1627static inline unsigned int task_cpu(const struct task_struct *p)
1628{
1629#ifdef CONFIG_THREAD_INFO_IN_TASK
1630        return p->cpu;
1631#else
1632        return task_thread_info(p)->cpu;
1633#endif
1634}
1635
1636extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1637
1638#else
1639
1640static inline unsigned int task_cpu(const struct task_struct *p)
1641{
1642        return 0;
1643}
1644
1645static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1646{
1647}
1648
1649#endif /* CONFIG_SMP */
1650
1651/*
1652 * In order to reduce various lock holder preemption latencies provide an
1653 * interface to see if a vCPU is currently running or not.
1654 *
1655 * This allows us to terminate optimistic spin loops and block, analogous to
1656 * the native optimistic spin heuristic of testing if the lock owner task is
1657 * running or not.
1658 */
1659#ifndef vcpu_is_preempted
1660# define vcpu_is_preempted(cpu) false
1661#endif
1662
1663extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1664extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1665
1666#ifndef TASK_SIZE_OF
1667#define TASK_SIZE_OF(tsk)       TASK_SIZE
1668#endif
1669
1670#endif
1671