linux/kernel/workqueue.c
<<
>>
Prefs
   1/*
   2 * kernel/workqueue.c - generic async execution with shared worker pool
   3 *
   4 * Copyright (C) 2002           Ingo Molnar
   5 *
   6 *   Derived from the taskqueue/keventd code by:
   7 *     David Woodhouse <dwmw2@infradead.org>
   8 *     Andrew Morton
   9 *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
  10 *     Theodore Ts'o <tytso@mit.edu>
  11 *
  12 * Made to use alloc_percpu by Christoph Lameter.
  13 *
  14 * Copyright (C) 2010           SUSE Linux Products GmbH
  15 * Copyright (C) 2010           Tejun Heo <tj@kernel.org>
  16 *
  17 * This is the generic async execution mechanism.  Work items as are
  18 * executed in process context.  The worker pool is shared and
  19 * automatically managed.  There are two worker pools for each CPU (one for
  20 * normal work items and the other for high priority ones) and some extra
  21 * pools for workqueues which are not bound to any specific CPU - the
  22 * number of these backing pools is dynamic.
  23 *
  24 * Please read Documentation/workqueue.txt for details.
  25 */
  26
  27#include <linux/export.h>
  28#include <linux/kernel.h>
  29#include <linux/sched.h>
  30#include <linux/init.h>
  31#include <linux/signal.h>
  32#include <linux/completion.h>
  33#include <linux/workqueue.h>
  34#include <linux/slab.h>
  35#include <linux/cpu.h>
  36#include <linux/notifier.h>
  37#include <linux/kthread.h>
  38#include <linux/hardirq.h>
  39#include <linux/mempolicy.h>
  40#include <linux/freezer.h>
  41#include <linux/kallsyms.h>
  42#include <linux/debug_locks.h>
  43#include <linux/lockdep.h>
  44#include <linux/idr.h>
  45#include <linux/jhash.h>
  46#include <linux/hashtable.h>
  47#include <linux/rculist.h>
  48#include <linux/nodemask.h>
  49#include <linux/moduleparam.h>
  50#include <linux/uaccess.h>
  51
  52#include "workqueue_internal.h"
  53
  54enum {
  55        /*
  56         * worker_pool flags
  57         *
  58         * A bound pool is either associated or disassociated with its CPU.
  59         * While associated (!DISASSOCIATED), all workers are bound to the
  60         * CPU and none has %WORKER_UNBOUND set and concurrency management
  61         * is in effect.
  62         *
  63         * While DISASSOCIATED, the cpu may be offline and all workers have
  64         * %WORKER_UNBOUND set and concurrency management disabled, and may
  65         * be executing on any CPU.  The pool behaves as an unbound one.
  66         *
  67         * Note that DISASSOCIATED should be flipped only while holding
  68         * attach_mutex to avoid changing binding state while
  69         * worker_attach_to_pool() is in progress.
  70         */
  71        POOL_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
  72
  73        /* worker flags */
  74        WORKER_DIE              = 1 << 1,       /* die die die */
  75        WORKER_IDLE             = 1 << 2,       /* is idle */
  76        WORKER_PREP             = 1 << 3,       /* preparing to run works */
  77        WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
  78        WORKER_UNBOUND          = 1 << 7,       /* worker is unbound */
  79        WORKER_REBOUND          = 1 << 8,       /* worker was rebound */
  80
  81        WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_CPU_INTENSIVE |
  82                                  WORKER_UNBOUND | WORKER_REBOUND,
  83
  84        NR_STD_WORKER_POOLS     = 2,            /* # standard pools per cpu */
  85
  86        UNBOUND_POOL_HASH_ORDER = 6,            /* hashed by pool->attrs */
  87        BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
  88
  89        MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
  90        IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
  91
  92        MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
  93                                                /* call for help after 10ms
  94                                                   (min two ticks) */
  95        MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
  96        CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
  97
  98        /*
  99         * Rescue workers are used only on emergencies and shared by
 100         * all cpus.  Give MIN_NICE.
 101         */
 102        RESCUER_NICE_LEVEL      = MIN_NICE,
 103        HIGHPRI_NICE_LEVEL      = MIN_NICE,
 104
 105        WQ_NAME_LEN             = 24,
 106};
 107
 108/*
 109 * Structure fields follow one of the following exclusion rules.
 110 *
 111 * I: Modifiable by initialization/destruction paths and read-only for
 112 *    everyone else.
 113 *
 114 * P: Preemption protected.  Disabling preemption is enough and should
 115 *    only be modified and accessed from the local cpu.
 116 *
 117 * L: pool->lock protected.  Access with pool->lock held.
 118 *
 119 * X: During normal operation, modification requires pool->lock and should
 120 *    be done only from local cpu.  Either disabling preemption on local
 121 *    cpu or grabbing pool->lock is enough for read access.  If
 122 *    POOL_DISASSOCIATED is set, it's identical to L.
 123 *
 124 * A: pool->attach_mutex protected.
 125 *
 126 * PL: wq_pool_mutex protected.
 127 *
 128 * PR: wq_pool_mutex protected for writes.  Sched-RCU protected for reads.
 129 *
 130 * WQ: wq->mutex protected.
 131 *
 132 * WR: wq->mutex protected for writes.  Sched-RCU protected for reads.
 133 *
 134 * MD: wq_mayday_lock protected.
 135 */
 136
 137/* struct worker is defined in workqueue_internal.h */
 138
 139struct worker_pool {
 140        spinlock_t              lock;           /* the pool lock */
 141        int                     cpu;            /* I: the associated cpu */
 142        int                     node;           /* I: the associated node ID */
 143        int                     id;             /* I: pool ID */
 144        unsigned int            flags;          /* X: flags */
 145
 146        struct list_head        worklist;       /* L: list of pending works */
 147        int                     nr_workers;     /* L: total number of workers */
 148
 149        /* nr_idle includes the ones off idle_list for rebinding */
 150        int                     nr_idle;        /* L: currently idle ones */
 151
 152        struct list_head        idle_list;      /* X: list of idle workers */
 153        struct timer_list       idle_timer;     /* L: worker idle timeout */
 154        struct timer_list       mayday_timer;   /* L: SOS timer for workers */
 155
 156        /* a workers is either on busy_hash or idle_list, or the manager */
 157        DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
 158                                                /* L: hash of busy workers */
 159
 160        /* see manage_workers() for details on the two manager mutexes */
 161        struct mutex            manager_arb;    /* manager arbitration */
 162        struct mutex            attach_mutex;   /* attach/detach exclusion */
 163        struct list_head        workers;        /* A: attached workers */
 164        struct completion       *detach_completion; /* all workers detached */
 165
 166        struct ida              worker_ida;     /* worker IDs for task name */
 167
 168        struct workqueue_attrs  *attrs;         /* I: worker attributes */
 169        struct hlist_node       hash_node;      /* PL: unbound_pool_hash node */
 170        int                     refcnt;         /* PL: refcnt for unbound pools */
 171
 172        /*
 173         * The current concurrency level.  As it's likely to be accessed
 174         * from other CPUs during try_to_wake_up(), put it in a separate
 175         * cacheline.
 176         */
 177        atomic_t                nr_running ____cacheline_aligned_in_smp;
 178
 179        /*
 180         * Destruction of pool is sched-RCU protected to allow dereferences
 181         * from get_work_pool().
 182         */
 183        struct rcu_head         rcu;
 184} ____cacheline_aligned_in_smp;
 185
 186/*
 187 * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
 188 * of work_struct->data are used for flags and the remaining high bits
 189 * point to the pwq; thus, pwqs need to be aligned at two's power of the
 190 * number of flag bits.
 191 */
 192struct pool_workqueue {
 193        struct worker_pool      *pool;          /* I: the associated pool */
 194        struct workqueue_struct *wq;            /* I: the owning workqueue */
 195        int                     work_color;     /* L: current color */
 196        int                     flush_color;    /* L: flushing color */
 197        int                     refcnt;         /* L: reference count */
 198        int                     nr_in_flight[WORK_NR_COLORS];
 199                                                /* L: nr of in_flight works */
 200        int                     nr_active;      /* L: nr of active works */
 201        int                     max_active;     /* L: max active works */
 202        struct list_head        delayed_works;  /* L: delayed works */
 203        struct list_head        pwqs_node;      /* WR: node on wq->pwqs */
 204        struct list_head        mayday_node;    /* MD: node on wq->maydays */
 205
 206        /*
 207         * Release of unbound pwq is punted to system_wq.  See put_pwq()
 208         * and pwq_unbound_release_workfn() for details.  pool_workqueue
 209         * itself is also sched-RCU protected so that the first pwq can be
 210         * determined without grabbing wq->mutex.
 211         */
 212        struct work_struct      unbound_release_work;
 213        struct rcu_head         rcu;
 214} __aligned(1 << WORK_STRUCT_FLAG_BITS);
 215
 216/*
 217 * Structure used to wait for workqueue flush.
 218 */
 219struct wq_flusher {
 220        struct list_head        list;           /* WQ: list of flushers */
 221        int                     flush_color;    /* WQ: flush color waiting for */
 222        struct completion       done;           /* flush completion */
 223};
 224
 225struct wq_device;
 226
 227/*
 228 * The externally visible workqueue.  It relays the issued work items to
 229 * the appropriate worker_pool through its pool_workqueues.
 230 */
 231struct workqueue_struct {
 232        struct list_head        pwqs;           /* WR: all pwqs of this wq */
 233        struct list_head        list;           /* PL: list of all workqueues */
 234
 235        struct mutex            mutex;          /* protects this wq */
 236        int                     work_color;     /* WQ: current work color */
 237        int                     flush_color;    /* WQ: current flush color */
 238        atomic_t                nr_pwqs_to_flush; /* flush in progress */
 239        struct wq_flusher       *first_flusher; /* WQ: first flusher */
 240        struct list_head        flusher_queue;  /* WQ: flush waiters */
 241        struct list_head        flusher_overflow; /* WQ: flush overflow list */
 242
 243        struct list_head        maydays;        /* MD: pwqs requesting rescue */
 244        struct worker           *rescuer;       /* I: rescue worker */
 245
 246        int                     nr_drainers;    /* WQ: drain in progress */
 247        int                     saved_max_active; /* WQ: saved pwq max_active */
 248
 249        struct workqueue_attrs  *unbound_attrs; /* WQ: only for unbound wqs */
 250        struct pool_workqueue   *dfl_pwq;       /* WQ: only for unbound wqs */
 251
 252#ifdef CONFIG_SYSFS
 253        struct wq_device        *wq_dev;        /* I: for sysfs interface */
 254#endif
 255#ifdef CONFIG_LOCKDEP
 256        struct lockdep_map      lockdep_map;
 257#endif
 258        char                    name[WQ_NAME_LEN]; /* I: workqueue name */
 259
 260        /* hot fields used during command issue, aligned to cacheline */
 261        unsigned int            flags ____cacheline_aligned; /* WQ: WQ_* flags */
 262        struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
 263        struct pool_workqueue __rcu *numa_pwq_tbl[]; /* FR: unbound pwqs indexed by node */
 264};
 265
 266static struct kmem_cache *pwq_cache;
 267
 268static cpumask_var_t *wq_numa_possible_cpumask;
 269                                        /* possible CPUs of each node */
 270
 271static bool wq_disable_numa;
 272module_param_named(disable_numa, wq_disable_numa, bool, 0444);
 273
 274/* see the comment above the definition of WQ_POWER_EFFICIENT */
 275#ifdef CONFIG_WQ_POWER_EFFICIENT_DEFAULT
 276static bool wq_power_efficient = true;
 277#else
 278static bool wq_power_efficient;
 279#endif
 280
 281module_param_named(power_efficient, wq_power_efficient, bool, 0444);
 282
 283static bool wq_numa_enabled;            /* unbound NUMA affinity enabled */
 284
 285/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
 286static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
 287
 288static DEFINE_MUTEX(wq_pool_mutex);     /* protects pools and workqueues list */
 289static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
 290
 291static LIST_HEAD(workqueues);           /* PL: list of all workqueues */
 292static bool workqueue_freezing;         /* PL: have wqs started freezing? */
 293
 294/* the per-cpu worker pools */
 295static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
 296                                     cpu_worker_pools);
 297
 298static DEFINE_IDR(worker_pool_idr);     /* PR: idr of all pools */
 299
 300/* PL: hash of all unbound pools keyed by pool->attrs */
 301static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
 302
 303/* I: attributes used when instantiating standard unbound pools on demand */
 304static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
 305
 306/* I: attributes used when instantiating ordered pools on demand */
 307static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
 308
 309struct workqueue_struct *system_wq __read_mostly;
 310EXPORT_SYMBOL(system_wq);
 311struct workqueue_struct *system_highpri_wq __read_mostly;
 312EXPORT_SYMBOL_GPL(system_highpri_wq);
 313struct workqueue_struct *system_long_wq __read_mostly;
 314EXPORT_SYMBOL_GPL(system_long_wq);
 315struct workqueue_struct *system_unbound_wq __read_mostly;
 316EXPORT_SYMBOL_GPL(system_unbound_wq);
 317struct workqueue_struct *system_freezable_wq __read_mostly;
 318EXPORT_SYMBOL_GPL(system_freezable_wq);
 319struct workqueue_struct *system_power_efficient_wq __read_mostly;
 320EXPORT_SYMBOL_GPL(system_power_efficient_wq);
 321struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
 322EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
 323
 324static int worker_thread(void *__worker);
 325static void copy_workqueue_attrs(struct workqueue_attrs *to,
 326                                 const struct workqueue_attrs *from);
 327
 328#define CREATE_TRACE_POINTS
 329#include <trace/events/workqueue.h>
 330
 331#define assert_rcu_or_pool_mutex()                                      \
 332        rcu_lockdep_assert(rcu_read_lock_sched_held() ||                \
 333                           lockdep_is_held(&wq_pool_mutex),             \
 334                           "sched RCU or wq_pool_mutex should be held")
 335
 336#define assert_rcu_or_wq_mutex(wq)                                      \
 337        rcu_lockdep_assert(rcu_read_lock_sched_held() ||                \
 338                           lockdep_is_held(&wq->mutex),                 \
 339                           "sched RCU or wq->mutex should be held")
 340
 341#define for_each_cpu_worker_pool(pool, cpu)                             \
 342        for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];               \
 343             (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
 344             (pool)++)
 345
 346/**
 347 * for_each_pool - iterate through all worker_pools in the system
 348 * @pool: iteration cursor
 349 * @pi: integer used for iteration
 350 *
 351 * This must be called either with wq_pool_mutex held or sched RCU read
 352 * locked.  If the pool needs to be used beyond the locking in effect, the
 353 * caller is responsible for guaranteeing that the pool stays online.
 354 *
 355 * The if/else clause exists only for the lockdep assertion and can be
 356 * ignored.
 357 */
 358#define for_each_pool(pool, pi)                                         \
 359        idr_for_each_entry(&worker_pool_idr, pool, pi)                  \
 360                if (({ assert_rcu_or_pool_mutex(); false; })) { }       \
 361                else
 362
 363/**
 364 * for_each_pool_worker - iterate through all workers of a worker_pool
 365 * @worker: iteration cursor
 366 * @pool: worker_pool to iterate workers of
 367 *
 368 * This must be called with @pool->attach_mutex.
 369 *
 370 * The if/else clause exists only for the lockdep assertion and can be
 371 * ignored.
 372 */
 373#define for_each_pool_worker(worker, pool)                              \
 374        list_for_each_entry((worker), &(pool)->workers, node)           \
 375                if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \
 376                else
 377
 378/**
 379 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
 380 * @pwq: iteration cursor
 381 * @wq: the target workqueue
 382 *
 383 * This must be called either with wq->mutex held or sched RCU read locked.
 384 * If the pwq needs to be used beyond the locking in effect, the caller is
 385 * responsible for guaranteeing that the pwq stays online.
 386 *
 387 * The if/else clause exists only for the lockdep assertion and can be
 388 * ignored.
 389 */
 390#define for_each_pwq(pwq, wq)                                           \
 391        list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node)          \
 392                if (({ assert_rcu_or_wq_mutex(wq); false; })) { }       \
 393                else
 394
 395#ifdef CONFIG_DEBUG_OBJECTS_WORK
 396
 397static struct debug_obj_descr work_debug_descr;
 398
 399static void *work_debug_hint(void *addr)
 400{
 401        return ((struct work_struct *) addr)->func;
 402}
 403
 404/*
 405 * fixup_init is called when:
 406 * - an active object is initialized
 407 */
 408static int work_fixup_init(void *addr, enum debug_obj_state state)
 409{
 410        struct work_struct *work = addr;
 411
 412        switch (state) {
 413        case ODEBUG_STATE_ACTIVE:
 414                cancel_work_sync(work);
 415                debug_object_init(work, &work_debug_descr);
 416                return 1;
 417        default:
 418                return 0;
 419        }
 420}
 421
 422/*
 423 * fixup_activate is called when:
 424 * - an active object is activated
 425 * - an unknown object is activated (might be a statically initialized object)
 426 */
 427static int work_fixup_activate(void *addr, enum debug_obj_state state)
 428{
 429        struct work_struct *work = addr;
 430
 431        switch (state) {
 432
 433        case ODEBUG_STATE_NOTAVAILABLE:
 434                /*
 435                 * This is not really a fixup. The work struct was
 436                 * statically initialized. We just make sure that it
 437                 * is tracked in the object tracker.
 438                 */
 439                if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
 440                        debug_object_init(work, &work_debug_descr);
 441                        debug_object_activate(work, &work_debug_descr);
 442                        return 0;
 443                }
 444                WARN_ON_ONCE(1);
 445                return 0;
 446
 447        case ODEBUG_STATE_ACTIVE:
 448                WARN_ON(1);
 449
 450        default:
 451                return 0;
 452        }
 453}
 454
 455/*
 456 * fixup_free is called when:
 457 * - an active object is freed
 458 */
 459static int work_fixup_free(void *addr, enum debug_obj_state state)
 460{
 461        struct work_struct *work = addr;
 462
 463        switch (state) {
 464        case ODEBUG_STATE_ACTIVE:
 465                cancel_work_sync(work);
 466                debug_object_free(work, &work_debug_descr);
 467                return 1;
 468        default:
 469                return 0;
 470        }
 471}
 472
 473static struct debug_obj_descr work_debug_descr = {
 474        .name           = "work_struct",
 475        .debug_hint     = work_debug_hint,
 476        .fixup_init     = work_fixup_init,
 477        .fixup_activate = work_fixup_activate,
 478        .fixup_free     = work_fixup_free,
 479};
 480
 481static inline void debug_work_activate(struct work_struct *work)
 482{
 483        debug_object_activate(work, &work_debug_descr);
 484}
 485
 486static inline void debug_work_deactivate(struct work_struct *work)
 487{
 488        debug_object_deactivate(work, &work_debug_descr);
 489}
 490
 491void __init_work(struct work_struct *work, int onstack)
 492{
 493        if (onstack)
 494                debug_object_init_on_stack(work, &work_debug_descr);
 495        else
 496                debug_object_init(work, &work_debug_descr);
 497}
 498EXPORT_SYMBOL_GPL(__init_work);
 499
 500void destroy_work_on_stack(struct work_struct *work)
 501{
 502        debug_object_free(work, &work_debug_descr);
 503}
 504EXPORT_SYMBOL_GPL(destroy_work_on_stack);
 505
 506void destroy_delayed_work_on_stack(struct delayed_work *work)
 507{
 508        destroy_timer_on_stack(&work->timer);
 509        debug_object_free(&work->work, &work_debug_descr);
 510}
 511EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
 512
 513#else
 514static inline void debug_work_activate(struct work_struct *work) { }
 515static inline void debug_work_deactivate(struct work_struct *work) { }
 516#endif
 517
 518/**
 519 * worker_pool_assign_id - allocate ID and assing it to @pool
 520 * @pool: the pool pointer of interest
 521 *
 522 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
 523 * successfully, -errno on failure.
 524 */
 525static int worker_pool_assign_id(struct worker_pool *pool)
 526{
 527        int ret;
 528
 529        lockdep_assert_held(&wq_pool_mutex);
 530
 531        ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
 532                        GFP_KERNEL);
 533        if (ret >= 0) {
 534                pool->id = ret;
 535                return 0;
 536        }
 537        return ret;
 538}
 539
 540/**
 541 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
 542 * @wq: the target workqueue
 543 * @node: the node ID
 544 *
 545 * This must be called either with pwq_lock held or sched RCU read locked.
 546 * If the pwq needs to be used beyond the locking in effect, the caller is
 547 * responsible for guaranteeing that the pwq stays online.
 548 *
 549 * Return: The unbound pool_workqueue for @node.
 550 */
 551static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
 552                                                  int node)
 553{
 554        assert_rcu_or_wq_mutex(wq);
 555        return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
 556}
 557
 558static unsigned int work_color_to_flags(int color)
 559{
 560        return color << WORK_STRUCT_COLOR_SHIFT;
 561}
 562
 563static int get_work_color(struct work_struct *work)
 564{
 565        return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
 566                ((1 << WORK_STRUCT_COLOR_BITS) - 1);
 567}
 568
 569static int work_next_color(int color)
 570{
 571        return (color + 1) % WORK_NR_COLORS;
 572}
 573
 574/*
 575 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
 576 * contain the pointer to the queued pwq.  Once execution starts, the flag
 577 * is cleared and the high bits contain OFFQ flags and pool ID.
 578 *
 579 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
 580 * and clear_work_data() can be used to set the pwq, pool or clear
 581 * work->data.  These functions should only be called while the work is
 582 * owned - ie. while the PENDING bit is set.
 583 *
 584 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
 585 * corresponding to a work.  Pool is available once the work has been
 586 * queued anywhere after initialization until it is sync canceled.  pwq is
 587 * available only while the work item is queued.
 588 *
 589 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
 590 * canceled.  While being canceled, a work item may have its PENDING set
 591 * but stay off timer and worklist for arbitrarily long and nobody should
 592 * try to steal the PENDING bit.
 593 */
 594static inline void set_work_data(struct work_struct *work, unsigned long data,
 595                                 unsigned long flags)
 596{
 597        WARN_ON_ONCE(!work_pending(work));
 598        atomic_long_set(&work->data, data | flags | work_static(work));
 599}
 600
 601static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
 602                         unsigned long extra_flags)
 603{
 604        set_work_data(work, (unsigned long)pwq,
 605                      WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
 606}
 607
 608static void set_work_pool_and_keep_pending(struct work_struct *work,
 609                                           int pool_id)
 610{
 611        set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
 612                      WORK_STRUCT_PENDING);
 613}
 614
 615static void set_work_pool_and_clear_pending(struct work_struct *work,
 616                                            int pool_id)
 617{
 618        /*
 619         * The following wmb is paired with the implied mb in
 620         * test_and_set_bit(PENDING) and ensures all updates to @work made
 621         * here are visible to and precede any updates by the next PENDING
 622         * owner.
 623         */
 624        smp_wmb();
 625        set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
 626}
 627
 628static void clear_work_data(struct work_struct *work)
 629{
 630        smp_wmb();      /* see set_work_pool_and_clear_pending() */
 631        set_work_data(work, WORK_STRUCT_NO_POOL, 0);
 632}
 633
 634static struct pool_workqueue *get_work_pwq(struct work_struct *work)
 635{
 636        unsigned long data = atomic_long_read(&work->data);
 637
 638        if (data & WORK_STRUCT_PWQ)
 639                return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
 640        else
 641                return NULL;
 642}
 643
 644/**
 645 * get_work_pool - return the worker_pool a given work was associated with
 646 * @work: the work item of interest
 647 *
 648 * Pools are created and destroyed under wq_pool_mutex, and allows read
 649 * access under sched-RCU read lock.  As such, this function should be
 650 * called under wq_pool_mutex or with preemption disabled.
 651 *
 652 * All fields of the returned pool are accessible as long as the above
 653 * mentioned locking is in effect.  If the returned pool needs to be used
 654 * beyond the critical section, the caller is responsible for ensuring the
 655 * returned pool is and stays online.
 656 *
 657 * Return: The worker_pool @work was last associated with.  %NULL if none.
 658 */
 659static struct worker_pool *get_work_pool(struct work_struct *work)
 660{
 661        unsigned long data = atomic_long_read(&work->data);
 662        int pool_id;
 663
 664        assert_rcu_or_pool_mutex();
 665
 666        if (data & WORK_STRUCT_PWQ)
 667                return ((struct pool_workqueue *)
 668                        (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
 669
 670        pool_id = data >> WORK_OFFQ_POOL_SHIFT;
 671        if (pool_id == WORK_OFFQ_POOL_NONE)
 672                return NULL;
 673
 674        return idr_find(&worker_pool_idr, pool_id);
 675}
 676
 677/**
 678 * get_work_pool_id - return the worker pool ID a given work is associated with
 679 * @work: the work item of interest
 680 *
 681 * Return: The worker_pool ID @work was last associated with.
 682 * %WORK_OFFQ_POOL_NONE if none.
 683 */
 684static int get_work_pool_id(struct work_struct *work)
 685{
 686        unsigned long data = atomic_long_read(&work->data);
 687
 688        if (data & WORK_STRUCT_PWQ)
 689                return ((struct pool_workqueue *)
 690                        (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
 691
 692        return data >> WORK_OFFQ_POOL_SHIFT;
 693}
 694
 695static void mark_work_canceling(struct work_struct *work)
 696{
 697        unsigned long pool_id = get_work_pool_id(work);
 698
 699        pool_id <<= WORK_OFFQ_POOL_SHIFT;
 700        set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
 701}
 702
 703static bool work_is_canceling(struct work_struct *work)
 704{
 705        unsigned long data = atomic_long_read(&work->data);
 706
 707        return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
 708}
 709
 710/*
 711 * Policy functions.  These define the policies on how the global worker
 712 * pools are managed.  Unless noted otherwise, these functions assume that
 713 * they're being called with pool->lock held.
 714 */
 715
 716static bool __need_more_worker(struct worker_pool *pool)
 717{
 718        return !atomic_read(&pool->nr_running);
 719}
 720
 721/*
 722 * Need to wake up a worker?  Called from anything but currently
 723 * running workers.
 724 *
 725 * Note that, because unbound workers never contribute to nr_running, this
 726 * function will always return %true for unbound pools as long as the
 727 * worklist isn't empty.
 728 */
 729static bool need_more_worker(struct worker_pool *pool)
 730{
 731        return !list_empty(&pool->worklist) && __need_more_worker(pool);
 732}
 733
 734/* Can I start working?  Called from busy but !running workers. */
 735static bool may_start_working(struct worker_pool *pool)
 736{
 737        return pool->nr_idle;
 738}
 739
 740/* Do I need to keep working?  Called from currently running workers. */
 741static bool keep_working(struct worker_pool *pool)
 742{
 743        return !list_empty(&pool->worklist) &&
 744                atomic_read(&pool->nr_running) <= 1;
 745}
 746
 747/* Do we need a new worker?  Called from manager. */
 748static bool need_to_create_worker(struct worker_pool *pool)
 749{
 750        return need_more_worker(pool) && !may_start_working(pool);
 751}
 752
 753/* Do we have too many workers and should some go away? */
 754static bool too_many_workers(struct worker_pool *pool)
 755{
 756        bool managing = mutex_is_locked(&pool->manager_arb);
 757        int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
 758        int nr_busy = pool->nr_workers - nr_idle;
 759
 760        return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
 761}
 762
 763/*
 764 * Wake up functions.
 765 */
 766
 767/* Return the first idle worker.  Safe with preemption disabled */
 768static struct worker *first_idle_worker(struct worker_pool *pool)
 769{
 770        if (unlikely(list_empty(&pool->idle_list)))
 771                return NULL;
 772
 773        return list_first_entry(&pool->idle_list, struct worker, entry);
 774}
 775
 776/**
 777 * wake_up_worker - wake up an idle worker
 778 * @pool: worker pool to wake worker from
 779 *
 780 * Wake up the first idle worker of @pool.
 781 *
 782 * CONTEXT:
 783 * spin_lock_irq(pool->lock).
 784 */
 785static void wake_up_worker(struct worker_pool *pool)
 786{
 787        struct worker *worker = first_idle_worker(pool);
 788
 789        if (likely(worker))
 790                wake_up_process(worker->task);
 791}
 792
 793/**
 794 * wq_worker_waking_up - a worker is waking up
 795 * @task: task waking up
 796 * @cpu: CPU @task is waking up to
 797 *
 798 * This function is called during try_to_wake_up() when a worker is
 799 * being awoken.
 800 *
 801 * CONTEXT:
 802 * spin_lock_irq(rq->lock)
 803 */
 804void wq_worker_waking_up(struct task_struct *task, int cpu)
 805{
 806        struct worker *worker = kthread_data(task);
 807
 808        if (!(worker->flags & WORKER_NOT_RUNNING)) {
 809                WARN_ON_ONCE(worker->pool->cpu != cpu);
 810                atomic_inc(&worker->pool->nr_running);
 811        }
 812}
 813
 814/**
 815 * wq_worker_sleeping - a worker is going to sleep
 816 * @task: task going to sleep
 817 * @cpu: CPU in question, must be the current CPU number
 818 *
 819 * This function is called during schedule() when a busy worker is
 820 * going to sleep.  Worker on the same cpu can be woken up by
 821 * returning pointer to its task.
 822 *
 823 * CONTEXT:
 824 * spin_lock_irq(rq->lock)
 825 *
 826 * Return:
 827 * Worker task on @cpu to wake up, %NULL if none.
 828 */
 829struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
 830{
 831        struct worker *worker = kthread_data(task), *to_wakeup = NULL;
 832        struct worker_pool *pool;
 833
 834        /*
 835         * Rescuers, which may not have all the fields set up like normal
 836         * workers, also reach here, let's not access anything before
 837         * checking NOT_RUNNING.
 838         */
 839        if (worker->flags & WORKER_NOT_RUNNING)
 840                return NULL;
 841
 842        pool = worker->pool;
 843
 844        /* this can only happen on the local cpu */
 845        if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu))
 846                return NULL;
 847
 848        /*
 849         * The counterpart of the following dec_and_test, implied mb,
 850         * worklist not empty test sequence is in insert_work().
 851         * Please read comment there.
 852         *
 853         * NOT_RUNNING is clear.  This means that we're bound to and
 854         * running on the local cpu w/ rq lock held and preemption
 855         * disabled, which in turn means that none else could be
 856         * manipulating idle_list, so dereferencing idle_list without pool
 857         * lock is safe.
 858         */
 859        if (atomic_dec_and_test(&pool->nr_running) &&
 860            !list_empty(&pool->worklist))
 861                to_wakeup = first_idle_worker(pool);
 862        return to_wakeup ? to_wakeup->task : NULL;
 863}
 864
 865/**
 866 * worker_set_flags - set worker flags and adjust nr_running accordingly
 867 * @worker: self
 868 * @flags: flags to set
 869 *
 870 * Set @flags in @worker->flags and adjust nr_running accordingly.
 871 *
 872 * CONTEXT:
 873 * spin_lock_irq(pool->lock)
 874 */
 875static inline void worker_set_flags(struct worker *worker, unsigned int flags)
 876{
 877        struct worker_pool *pool = worker->pool;
 878
 879        WARN_ON_ONCE(worker->task != current);
 880
 881        /* If transitioning into NOT_RUNNING, adjust nr_running. */
 882        if ((flags & WORKER_NOT_RUNNING) &&
 883            !(worker->flags & WORKER_NOT_RUNNING)) {
 884                atomic_dec(&pool->nr_running);
 885        }
 886
 887        worker->flags |= flags;
 888}
 889
 890/**
 891 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
 892 * @worker: self
 893 * @flags: flags to clear
 894 *
 895 * Clear @flags in @worker->flags and adjust nr_running accordingly.
 896 *
 897 * CONTEXT:
 898 * spin_lock_irq(pool->lock)
 899 */
 900static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 901{
 902        struct worker_pool *pool = worker->pool;
 903        unsigned int oflags = worker->flags;
 904
 905        WARN_ON_ONCE(worker->task != current);
 906
 907        worker->flags &= ~flags;
 908
 909        /*
 910         * If transitioning out of NOT_RUNNING, increment nr_running.  Note
 911         * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
 912         * of multiple flags, not a single flag.
 913         */
 914        if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
 915                if (!(worker->flags & WORKER_NOT_RUNNING))
 916                        atomic_inc(&pool->nr_running);
 917}
 918
 919/**
 920 * find_worker_executing_work - find worker which is executing a work
 921 * @pool: pool of interest
 922 * @work: work to find worker for
 923 *
 924 * Find a worker which is executing @work on @pool by searching
 925 * @pool->busy_hash which is keyed by the address of @work.  For a worker
 926 * to match, its current execution should match the address of @work and
 927 * its work function.  This is to avoid unwanted dependency between
 928 * unrelated work executions through a work item being recycled while still
 929 * being executed.
 930 *
 931 * This is a bit tricky.  A work item may be freed once its execution
 932 * starts and nothing prevents the freed area from being recycled for
 933 * another work item.  If the same work item address ends up being reused
 934 * before the original execution finishes, workqueue will identify the
 935 * recycled work item as currently executing and make it wait until the
 936 * current execution finishes, introducing an unwanted dependency.
 937 *
 938 * This function checks the work item address and work function to avoid
 939 * false positives.  Note that this isn't complete as one may construct a
 940 * work function which can introduce dependency onto itself through a
 941 * recycled work item.  Well, if somebody wants to shoot oneself in the
 942 * foot that badly, there's only so much we can do, and if such deadlock
 943 * actually occurs, it should be easy to locate the culprit work function.
 944 *
 945 * CONTEXT:
 946 * spin_lock_irq(pool->lock).
 947 *
 948 * Return:
 949 * Pointer to worker which is executing @work if found, %NULL
 950 * otherwise.
 951 */
 952static struct worker *find_worker_executing_work(struct worker_pool *pool,
 953                                                 struct work_struct *work)
 954{
 955        struct worker *worker;
 956
 957        hash_for_each_possible(pool->busy_hash, worker, hentry,
 958                               (unsigned long)work)
 959                if (worker->current_work == work &&
 960                    worker->current_func == work->func)
 961                        return worker;
 962
 963        return NULL;
 964}
 965
 966/**
 967 * move_linked_works - move linked works to a list
 968 * @work: start of series of works to be scheduled
 969 * @head: target list to append @work to
 970 * @nextp: out paramter for nested worklist walking
 971 *
 972 * Schedule linked works starting from @work to @head.  Work series to
 973 * be scheduled starts at @work and includes any consecutive work with
 974 * WORK_STRUCT_LINKED set in its predecessor.
 975 *
 976 * If @nextp is not NULL, it's updated to point to the next work of
 977 * the last scheduled work.  This allows move_linked_works() to be
 978 * nested inside outer list_for_each_entry_safe().
 979 *
 980 * CONTEXT:
 981 * spin_lock_irq(pool->lock).
 982 */
 983static void move_linked_works(struct work_struct *work, struct list_head *head,
 984                              struct work_struct **nextp)
 985{
 986        struct work_struct *n;
 987
 988        /*
 989         * Linked worklist will always end before the end of the list,
 990         * use NULL for list head.
 991         */
 992        list_for_each_entry_safe_from(work, n, NULL, entry) {
 993                list_move_tail(&work->entry, head);
 994                if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
 995                        break;
 996        }
 997
 998        /*
 999         * If we're already inside safe list traversal and have moved
1000         * multiple works to the scheduled queue, the next position
1001         * needs to be updated.
1002         */
1003        if (nextp)
1004                *nextp = n;
1005}
1006
1007/**
1008 * get_pwq - get an extra reference on the specified pool_workqueue
1009 * @pwq: pool_workqueue to get
1010 *
1011 * Obtain an extra reference on @pwq.  The caller should guarantee that
1012 * @pwq has positive refcnt and be holding the matching pool->lock.
1013 */
1014static void get_pwq(struct pool_workqueue *pwq)
1015{
1016        lockdep_assert_held(&pwq->pool->lock);
1017        WARN_ON_ONCE(pwq->refcnt <= 0);
1018        pwq->refcnt++;
1019}
1020
1021/**
1022 * put_pwq - put a pool_workqueue reference
1023 * @pwq: pool_workqueue to put
1024 *
1025 * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
1026 * destruction.  The caller should be holding the matching pool->lock.
1027 */
1028static void put_pwq(struct pool_workqueue *pwq)
1029{
1030        lockdep_assert_held(&pwq->pool->lock);
1031        if (likely(--pwq->refcnt))
1032                return;
1033        if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1034                return;
1035        /*
1036         * @pwq can't be released under pool->lock, bounce to
1037         * pwq_unbound_release_workfn().  This never recurses on the same
1038         * pool->lock as this path is taken only for unbound workqueues and
1039         * the release work item is scheduled on a per-cpu workqueue.  To
1040         * avoid lockdep warning, unbound pool->locks are given lockdep
1041         * subclass of 1 in get_unbound_pool().
1042         */
1043        schedule_work(&pwq->unbound_release_work);
1044}
1045
1046/**
1047 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1048 * @pwq: pool_workqueue to put (can be %NULL)
1049 *
1050 * put_pwq() with locking.  This function also allows %NULL @pwq.
1051 */
1052static void put_pwq_unlocked(struct pool_workqueue *pwq)
1053{
1054        if (pwq) {
1055                /*
1056                 * As both pwqs and pools are sched-RCU protected, the
1057                 * following lock operations are safe.
1058                 */
1059                spin_lock_irq(&pwq->pool->lock);
1060                put_pwq(pwq);
1061                spin_unlock_irq(&pwq->pool->lock);
1062        }
1063}
1064
1065static void pwq_activate_delayed_work(struct work_struct *work)
1066{
1067        struct pool_workqueue *pwq = get_work_pwq(work);
1068
1069        trace_workqueue_activate_work(work);
1070        move_linked_works(work, &pwq->pool->worklist, NULL);
1071        __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1072        pwq->nr_active++;
1073}
1074
1075static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
1076{
1077        struct work_struct *work = list_first_entry(&pwq->delayed_works,
1078                                                    struct work_struct, entry);
1079
1080        pwq_activate_delayed_work(work);
1081}
1082
1083/**
1084 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1085 * @pwq: pwq of interest
1086 * @color: color of work which left the queue
1087 *
1088 * A work either has completed or is removed from pending queue,
1089 * decrement nr_in_flight of its pwq and handle workqueue flushing.
1090 *
1091 * CONTEXT:
1092 * spin_lock_irq(pool->lock).
1093 */
1094static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1095{
1096        /* uncolored work items don't participate in flushing or nr_active */
1097        if (color == WORK_NO_COLOR)
1098                goto out_put;
1099
1100        pwq->nr_in_flight[color]--;
1101
1102        pwq->nr_active--;
1103        if (!list_empty(&pwq->delayed_works)) {
1104                /* one down, submit a delayed one */
1105                if (pwq->nr_active < pwq->max_active)
1106                        pwq_activate_first_delayed(pwq);
1107        }
1108
1109        /* is flush in progress and are we at the flushing tip? */
1110        if (likely(pwq->flush_color != color))
1111                goto out_put;
1112
1113        /* are there still in-flight works? */
1114        if (pwq->nr_in_flight[color])
1115                goto out_put;
1116
1117        /* this pwq is done, clear flush_color */
1118        pwq->flush_color = -1;
1119
1120        /*
1121         * If this was the last pwq, wake up the first flusher.  It
1122         * will handle the rest.
1123         */
1124        if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1125                complete(&pwq->wq->first_flusher->done);
1126out_put:
1127        put_pwq(pwq);
1128}
1129
1130/**
1131 * try_to_grab_pending - steal work item from worklist and disable irq
1132 * @work: work item to steal
1133 * @is_dwork: @work is a delayed_work
1134 * @flags: place to store irq state
1135 *
1136 * Try to grab PENDING bit of @work.  This function can handle @work in any
1137 * stable state - idle, on timer or on worklist.
1138 *
1139 * Return:
1140 *  1           if @work was pending and we successfully stole PENDING
1141 *  0           if @work was idle and we claimed PENDING
1142 *  -EAGAIN     if PENDING couldn't be grabbed at the moment, safe to busy-retry
1143 *  -ENOENT     if someone else is canceling @work, this state may persist
1144 *              for arbitrarily long
1145 *
1146 * Note:
1147 * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
1148 * interrupted while holding PENDING and @work off queue, irq must be
1149 * disabled on entry.  This, combined with delayed_work->timer being
1150 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1151 *
1152 * On successful return, >= 0, irq is disabled and the caller is
1153 * responsible for releasing it using local_irq_restore(*@flags).
1154 *
1155 * This function is safe to call from any context including IRQ handler.
1156 */
1157static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1158                               unsigned long *flags)
1159{
1160        struct worker_pool *pool;
1161        struct pool_workqueue *pwq;
1162
1163        local_irq_save(*flags);
1164
1165        /* try to steal the timer if it exists */
1166        if (is_dwork) {
1167                struct delayed_work *dwork = to_delayed_work(work);
1168
1169                /*
1170                 * dwork->timer is irqsafe.  If del_timer() fails, it's
1171                 * guaranteed that the timer is not queued anywhere and not
1172                 * running on the local CPU.
1173                 */
1174                if (likely(del_timer(&dwork->timer)))
1175                        return 1;
1176        }
1177
1178        /* try to claim PENDING the normal way */
1179        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1180                return 0;
1181
1182        /*
1183         * The queueing is in progress, or it is already queued. Try to
1184         * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1185         */
1186        pool = get_work_pool(work);
1187        if (!pool)
1188                goto fail;
1189
1190        spin_lock(&pool->lock);
1191        /*
1192         * work->data is guaranteed to point to pwq only while the work
1193         * item is queued on pwq->wq, and both updating work->data to point
1194         * to pwq on queueing and to pool on dequeueing are done under
1195         * pwq->pool->lock.  This in turn guarantees that, if work->data
1196         * points to pwq which is associated with a locked pool, the work
1197         * item is currently queued on that pool.
1198         */
1199        pwq = get_work_pwq(work);
1200        if (pwq && pwq->pool == pool) {
1201                debug_work_deactivate(work);
1202
1203                /*
1204                 * A delayed work item cannot be grabbed directly because
1205                 * it might have linked NO_COLOR work items which, if left
1206                 * on the delayed_list, will confuse pwq->nr_active
1207                 * management later on and cause stall.  Make sure the work
1208                 * item is activated before grabbing.
1209                 */
1210                if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1211                        pwq_activate_delayed_work(work);
1212
1213                list_del_init(&work->entry);
1214                pwq_dec_nr_in_flight(pwq, get_work_color(work));
1215
1216                /* work->data points to pwq iff queued, point to pool */
1217                set_work_pool_and_keep_pending(work, pool->id);
1218
1219                spin_unlock(&pool->lock);
1220                return 1;
1221        }
1222        spin_unlock(&pool->lock);
1223fail:
1224        local_irq_restore(*flags);
1225        if (work_is_canceling(work))
1226                return -ENOENT;
1227        cpu_relax();
1228        return -EAGAIN;
1229}
1230
1231/**
1232 * insert_work - insert a work into a pool
1233 * @pwq: pwq @work belongs to
1234 * @work: work to insert
1235 * @head: insertion point
1236 * @extra_flags: extra WORK_STRUCT_* flags to set
1237 *
1238 * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
1239 * work_struct flags.
1240 *
1241 * CONTEXT:
1242 * spin_lock_irq(pool->lock).
1243 */
1244static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1245                        struct list_head *head, unsigned int extra_flags)
1246{
1247        struct worker_pool *pool = pwq->pool;
1248
1249        /* we own @work, set data and link */
1250        set_work_pwq(work, pwq, extra_flags);
1251        list_add_tail(&work->entry, head);
1252        get_pwq(pwq);
1253
1254        /*
1255         * Ensure either wq_worker_sleeping() sees the above
1256         * list_add_tail() or we see zero nr_running to avoid workers lying
1257         * around lazily while there are works to be processed.
1258         */
1259        smp_mb();
1260
1261        if (__need_more_worker(pool))
1262                wake_up_worker(pool);
1263}
1264
1265/*
1266 * Test whether @work is being queued from another work executing on the
1267 * same workqueue.
1268 */
1269static bool is_chained_work(struct workqueue_struct *wq)
1270{
1271        struct worker *worker;
1272
1273        worker = current_wq_worker();
1274        /*
1275         * Return %true iff I'm a worker execuing a work item on @wq.  If
1276         * I'm @worker, it's safe to dereference it without locking.
1277         */
1278        return worker && worker->current_pwq->wq == wq;
1279}
1280
1281static void __queue_work(int cpu, struct workqueue_struct *wq,
1282                         struct work_struct *work)
1283{
1284        struct pool_workqueue *pwq;
1285        struct worker_pool *last_pool;
1286        struct list_head *worklist;
1287        unsigned int work_flags;
1288        unsigned int req_cpu = cpu;
1289
1290        /*
1291         * While a work item is PENDING && off queue, a task trying to
1292         * steal the PENDING will busy-loop waiting for it to either get
1293         * queued or lose PENDING.  Grabbing PENDING and queueing should
1294         * happen with IRQ disabled.
1295         */
1296        WARN_ON_ONCE(!irqs_disabled());
1297
1298        debug_work_activate(work);
1299
1300        /* if draining, only works from the same workqueue are allowed */
1301        if (unlikely(wq->flags & __WQ_DRAINING) &&
1302            WARN_ON_ONCE(!is_chained_work(wq)))
1303                return;
1304retry:
1305        if (req_cpu == WORK_CPU_UNBOUND)
1306                cpu = raw_smp_processor_id();
1307
1308        /* pwq which will be used unless @work is executing elsewhere */
1309        if (!(wq->flags & WQ_UNBOUND))
1310                pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1311        else
1312                pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1313
1314        /*
1315         * If @work was previously on a different pool, it might still be
1316         * running there, in which case the work needs to be queued on that
1317         * pool to guarantee non-reentrancy.
1318         */
1319        last_pool = get_work_pool(work);
1320        if (last_pool && last_pool != pwq->pool) {
1321                struct worker *worker;
1322
1323                spin_lock(&last_pool->lock);
1324
1325                worker = find_worker_executing_work(last_pool, work);
1326
1327                if (worker && worker->current_pwq->wq == wq) {
1328                        pwq = worker->current_pwq;
1329                } else {
1330                        /* meh... not running there, queue here */
1331                        spin_unlock(&last_pool->lock);
1332                        spin_lock(&pwq->pool->lock);
1333                }
1334        } else {
1335                spin_lock(&pwq->pool->lock);
1336        }
1337
1338        /*
1339         * pwq is determined and locked.  For unbound pools, we could have
1340         * raced with pwq release and it could already be dead.  If its
1341         * refcnt is zero, repeat pwq selection.  Note that pwqs never die
1342         * without another pwq replacing it in the numa_pwq_tbl or while
1343         * work items are executing on it, so the retrying is guaranteed to
1344         * make forward-progress.
1345         */
1346        if (unlikely(!pwq->refcnt)) {
1347                if (wq->flags & WQ_UNBOUND) {
1348                        spin_unlock(&pwq->pool->lock);
1349                        cpu_relax();
1350                        goto retry;
1351                }
1352                /* oops */
1353                WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1354                          wq->name, cpu);
1355        }
1356
1357        /* pwq determined, queue */
1358        trace_workqueue_queue_work(req_cpu, pwq, work);
1359
1360        if (WARN_ON(!list_empty(&work->entry))) {
1361                spin_unlock(&pwq->pool->lock);
1362                return;
1363        }
1364
1365        pwq->nr_in_flight[pwq->work_color]++;
1366        work_flags = work_color_to_flags(pwq->work_color);
1367
1368        if (likely(pwq->nr_active < pwq->max_active)) {
1369                trace_workqueue_activate_work(work);
1370                pwq->nr_active++;
1371                worklist = &pwq->pool->worklist;
1372        } else {
1373                work_flags |= WORK_STRUCT_DELAYED;
1374                worklist = &pwq->delayed_works;
1375        }
1376
1377        insert_work(pwq, work, worklist, work_flags);
1378
1379        spin_unlock(&pwq->pool->lock);
1380}
1381
1382/**
1383 * queue_work_on - queue work on specific cpu
1384 * @cpu: CPU number to execute work on
1385 * @wq: workqueue to use
1386 * @work: work to queue
1387 *
1388 * We queue the work to a specific CPU, the caller must ensure it
1389 * can't go away.
1390 *
1391 * Return: %false if @work was already on a queue, %true otherwise.
1392 */
1393bool queue_work_on(int cpu, struct workqueue_struct *wq,
1394                   struct work_struct *work)
1395{
1396        bool ret = false;
1397        unsigned long flags;
1398
1399        local_irq_save(flags);
1400
1401        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1402                __queue_work(cpu, wq, work);
1403                ret = true;
1404        }
1405
1406        local_irq_restore(flags);
1407        return ret;
1408}
1409EXPORT_SYMBOL(queue_work_on);
1410
1411void delayed_work_timer_fn(unsigned long __data)
1412{
1413        struct delayed_work *dwork = (struct delayed_work *)__data;
1414
1415        /* should have been called from irqsafe timer with irq already off */
1416        __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1417}
1418EXPORT_SYMBOL(delayed_work_timer_fn);
1419
1420static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1421                                struct delayed_work *dwork, unsigned long delay)
1422{
1423        struct timer_list *timer = &dwork->timer;
1424        struct work_struct *work = &dwork->work;
1425
1426        WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1427                     timer->data != (unsigned long)dwork);
1428        WARN_ON_ONCE(timer_pending(timer));
1429        WARN_ON_ONCE(!list_empty(&work->entry));
1430
1431        /*
1432         * If @delay is 0, queue @dwork->work immediately.  This is for
1433         * both optimization and correctness.  The earliest @timer can
1434         * expire is on the closest next tick and delayed_work users depend
1435         * on that there's no such delay when @delay is 0.
1436         */
1437        if (!delay) {
1438                __queue_work(cpu, wq, &dwork->work);
1439                return;
1440        }
1441
1442        timer_stats_timer_set_start_info(&dwork->timer);
1443
1444        dwork->wq = wq;
1445        dwork->cpu = cpu;
1446        timer->expires = jiffies + delay;
1447
1448        if (unlikely(cpu != WORK_CPU_UNBOUND))
1449                add_timer_on(timer, cpu);
1450        else
1451                add_timer(timer);
1452}
1453
1454/**
1455 * queue_delayed_work_on - queue work on specific CPU after delay
1456 * @cpu: CPU number to execute work on
1457 * @wq: workqueue to use
1458 * @dwork: work to queue
1459 * @delay: number of jiffies to wait before queueing
1460 *
1461 * Return: %false if @work was already on a queue, %true otherwise.  If
1462 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1463 * execution.
1464 */
1465bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1466                           struct delayed_work *dwork, unsigned long delay)
1467{
1468        struct work_struct *work = &dwork->work;
1469        bool ret = false;
1470        unsigned long flags;
1471
1472        /* read the comment in __queue_work() */
1473        local_irq_save(flags);
1474
1475        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1476                __queue_delayed_work(cpu, wq, dwork, delay);
1477                ret = true;
1478        }
1479
1480        local_irq_restore(flags);
1481        return ret;
1482}
1483EXPORT_SYMBOL(queue_delayed_work_on);
1484
1485/**
1486 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1487 * @cpu: CPU number to execute work on
1488 * @wq: workqueue to use
1489 * @dwork: work to queue
1490 * @delay: number of jiffies to wait before queueing
1491 *
1492 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1493 * modify @dwork's timer so that it expires after @delay.  If @delay is
1494 * zero, @work is guaranteed to be scheduled immediately regardless of its
1495 * current state.
1496 *
1497 * Return: %false if @dwork was idle and queued, %true if @dwork was
1498 * pending and its timer was modified.
1499 *
1500 * This function is safe to call from any context including IRQ handler.
1501 * See try_to_grab_pending() for details.
1502 */
1503bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1504                         struct delayed_work *dwork, unsigned long delay)
1505{
1506        unsigned long flags;
1507        int ret;
1508
1509        do {
1510                ret = try_to_grab_pending(&dwork->work, true, &flags);
1511        } while (unlikely(ret == -EAGAIN));
1512
1513        if (likely(ret >= 0)) {
1514                __queue_delayed_work(cpu, wq, dwork, delay);
1515                local_irq_restore(flags);
1516        }
1517
1518        /* -ENOENT from try_to_grab_pending() becomes %true */
1519        return ret;
1520}
1521EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1522
1523/**
1524 * worker_enter_idle - enter idle state
1525 * @worker: worker which is entering idle state
1526 *
1527 * @worker is entering idle state.  Update stats and idle timer if
1528 * necessary.
1529 *
1530 * LOCKING:
1531 * spin_lock_irq(pool->lock).
1532 */
1533static void worker_enter_idle(struct worker *worker)
1534{
1535        struct worker_pool *pool = worker->pool;
1536
1537        if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1538            WARN_ON_ONCE(!list_empty(&worker->entry) &&
1539                         (worker->hentry.next || worker->hentry.pprev)))
1540                return;
1541
1542        /* can't use worker_set_flags(), also called from create_worker() */
1543        worker->flags |= WORKER_IDLE;
1544        pool->nr_idle++;
1545        worker->last_active = jiffies;
1546
1547        /* idle_list is LIFO */
1548        list_add(&worker->entry, &pool->idle_list);
1549
1550        if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1551                mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1552
1553        /*
1554         * Sanity check nr_running.  Because wq_unbind_fn() releases
1555         * pool->lock between setting %WORKER_UNBOUND and zapping
1556         * nr_running, the warning may trigger spuriously.  Check iff
1557         * unbind is not in progress.
1558         */
1559        WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1560                     pool->nr_workers == pool->nr_idle &&
1561                     atomic_read(&pool->nr_running));
1562}
1563
1564/**
1565 * worker_leave_idle - leave idle state
1566 * @worker: worker which is leaving idle state
1567 *
1568 * @worker is leaving idle state.  Update stats.
1569 *
1570 * LOCKING:
1571 * spin_lock_irq(pool->lock).
1572 */
1573static void worker_leave_idle(struct worker *worker)
1574{
1575        struct worker_pool *pool = worker->pool;
1576
1577        if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1578                return;
1579        worker_clr_flags(worker, WORKER_IDLE);
1580        pool->nr_idle--;
1581        list_del_init(&worker->entry);
1582}
1583
1584static struct worker *alloc_worker(int node)
1585{
1586        struct worker *worker;
1587
1588        worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1589        if (worker) {
1590                INIT_LIST_HEAD(&worker->entry);
1591                INIT_LIST_HEAD(&worker->scheduled);
1592                INIT_LIST_HEAD(&worker->node);
1593                /* on creation a worker is in !idle && prep state */
1594                worker->flags = WORKER_PREP;
1595        }
1596        return worker;
1597}
1598
1599/**
1600 * worker_attach_to_pool() - attach a worker to a pool
1601 * @worker: worker to be attached
1602 * @pool: the target pool
1603 *
1604 * Attach @worker to @pool.  Once attached, the %WORKER_UNBOUND flag and
1605 * cpu-binding of @worker are kept coordinated with the pool across
1606 * cpu-[un]hotplugs.
1607 */
1608static void worker_attach_to_pool(struct worker *worker,
1609                                   struct worker_pool *pool)
1610{
1611        mutex_lock(&pool->attach_mutex);
1612
1613        /*
1614         * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1615         * online CPUs.  It'll be re-applied when any of the CPUs come up.
1616         */
1617        set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1618
1619        /*
1620         * The pool->attach_mutex ensures %POOL_DISASSOCIATED remains
1621         * stable across this function.  See the comments above the
1622         * flag definition for details.
1623         */
1624        if (pool->flags & POOL_DISASSOCIATED)
1625                worker->flags |= WORKER_UNBOUND;
1626
1627        list_add_tail(&worker->node, &pool->workers);
1628
1629        mutex_unlock(&pool->attach_mutex);
1630}
1631
1632/**
1633 * worker_detach_from_pool() - detach a worker from its pool
1634 * @worker: worker which is attached to its pool
1635 * @pool: the pool @worker is attached to
1636 *
1637 * Undo the attaching which had been done in worker_attach_to_pool().  The
1638 * caller worker shouldn't access to the pool after detached except it has
1639 * other reference to the pool.
1640 */
1641static void worker_detach_from_pool(struct worker *worker,
1642                                    struct worker_pool *pool)
1643{
1644        struct completion *detach_completion = NULL;
1645
1646        mutex_lock(&pool->attach_mutex);
1647        list_del(&worker->node);
1648        if (list_empty(&pool->workers))
1649                detach_completion = pool->detach_completion;
1650        mutex_unlock(&pool->attach_mutex);
1651
1652        /* clear leftover flags without pool->lock after it is detached */
1653        worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1654
1655        if (detach_completion)
1656                complete(detach_completion);
1657}
1658
1659/**
1660 * create_worker - create a new workqueue worker
1661 * @pool: pool the new worker will belong to
1662 *
1663 * Create and start a new worker which is attached to @pool.
1664 *
1665 * CONTEXT:
1666 * Might sleep.  Does GFP_KERNEL allocations.
1667 *
1668 * Return:
1669 * Pointer to the newly created worker.
1670 */
1671static struct worker *create_worker(struct worker_pool *pool)
1672{
1673        struct worker *worker = NULL;
1674        int id = -1;
1675        char id_buf[16];
1676
1677        /* ID is needed to determine kthread name */
1678        id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
1679        if (id < 0)
1680                goto fail;
1681
1682        worker = alloc_worker(pool->node);
1683        if (!worker)
1684                goto fail;
1685
1686        worker->pool = pool;
1687        worker->id = id;
1688
1689        if (pool->cpu >= 0)
1690                snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1691                         pool->attrs->nice < 0  ? "H" : "");
1692        else
1693                snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1694
1695        worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1696                                              "kworker/%s", id_buf);
1697        if (IS_ERR(worker->task))
1698                goto fail;
1699
1700        set_user_nice(worker->task, pool->attrs->nice);
1701
1702        /* prevent userland from meddling with cpumask of workqueue workers */
1703        worker->task->flags |= PF_NO_SETAFFINITY;
1704
1705        /* successful, attach the worker to the pool */
1706        worker_attach_to_pool(worker, pool);
1707
1708        /* start the newly created worker */
1709        spin_lock_irq(&pool->lock);
1710        worker->pool->nr_workers++;
1711        worker_enter_idle(worker);
1712        wake_up_process(worker->task);
1713        spin_unlock_irq(&pool->lock);
1714
1715        return worker;
1716
1717fail:
1718        if (id >= 0)
1719                ida_simple_remove(&pool->worker_ida, id);
1720        kfree(worker);
1721        return NULL;
1722}
1723
1724/**
1725 * destroy_worker - destroy a workqueue worker
1726 * @worker: worker to be destroyed
1727 *
1728 * Destroy @worker and adjust @pool stats accordingly.  The worker should
1729 * be idle.
1730 *
1731 * CONTEXT:
1732 * spin_lock_irq(pool->lock).
1733 */
1734static void destroy_worker(struct worker *worker)
1735{
1736        struct worker_pool *pool = worker->pool;
1737
1738        lockdep_assert_held(&pool->lock);
1739
1740        /* sanity check frenzy */
1741        if (WARN_ON(worker->current_work) ||
1742            WARN_ON(!list_empty(&worker->scheduled)) ||
1743            WARN_ON(!(worker->flags & WORKER_IDLE)))
1744                return;
1745
1746        pool->nr_workers--;
1747        pool->nr_idle--;
1748
1749        list_del_init(&worker->entry);
1750        worker->flags |= WORKER_DIE;
1751        wake_up_process(worker->task);
1752}
1753
1754static void idle_worker_timeout(unsigned long __pool)
1755{
1756        struct worker_pool *pool = (void *)__pool;
1757
1758        spin_lock_irq(&pool->lock);
1759
1760        while (too_many_workers(pool)) {
1761                struct worker *worker;
1762                unsigned long expires;
1763
1764                /* idle_list is kept in LIFO order, check the last one */
1765                worker = list_entry(pool->idle_list.prev, struct worker, entry);
1766                expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1767
1768                if (time_before(jiffies, expires)) {
1769                        mod_timer(&pool->idle_timer, expires);
1770                        break;
1771                }
1772
1773                destroy_worker(worker);
1774        }
1775
1776        spin_unlock_irq(&pool->lock);
1777}
1778
1779static void send_mayday(struct work_struct *work)
1780{
1781        struct pool_workqueue *pwq = get_work_pwq(work);
1782        struct workqueue_struct *wq = pwq->wq;
1783
1784        lockdep_assert_held(&wq_mayday_lock);
1785
1786        if (!wq->rescuer)
1787                return;
1788
1789        /* mayday mayday mayday */
1790        if (list_empty(&pwq->mayday_node)) {
1791                /*
1792                 * If @pwq is for an unbound wq, its base ref may be put at
1793                 * any time due to an attribute change.  Pin @pwq until the
1794                 * rescuer is done with it.
1795                 */
1796                get_pwq(pwq);
1797                list_add_tail(&pwq->mayday_node, &wq->maydays);
1798                wake_up_process(wq->rescuer->task);
1799        }
1800}
1801
1802static void pool_mayday_timeout(unsigned long __pool)
1803{
1804        struct worker_pool *pool = (void *)__pool;
1805        struct work_struct *work;
1806
1807        spin_lock_irq(&pool->lock);
1808        spin_lock(&wq_mayday_lock);             /* for wq->maydays */
1809
1810        if (need_to_create_worker(pool)) {
1811                /*
1812                 * We've been trying to create a new worker but
1813                 * haven't been successful.  We might be hitting an
1814                 * allocation deadlock.  Send distress signals to
1815                 * rescuers.
1816                 */
1817                list_for_each_entry(work, &pool->worklist, entry)
1818                        send_mayday(work);
1819        }
1820
1821        spin_unlock(&wq_mayday_lock);
1822        spin_unlock_irq(&pool->lock);
1823
1824        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1825}
1826
1827/**
1828 * maybe_create_worker - create a new worker if necessary
1829 * @pool: pool to create a new worker for
1830 *
1831 * Create a new worker for @pool if necessary.  @pool is guaranteed to
1832 * have at least one idle worker on return from this function.  If
1833 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1834 * sent to all rescuers with works scheduled on @pool to resolve
1835 * possible allocation deadlock.
1836 *
1837 * On return, need_to_create_worker() is guaranteed to be %false and
1838 * may_start_working() %true.
1839 *
1840 * LOCKING:
1841 * spin_lock_irq(pool->lock) which may be released and regrabbed
1842 * multiple times.  Does GFP_KERNEL allocations.  Called only from
1843 * manager.
1844 */
1845static void maybe_create_worker(struct worker_pool *pool)
1846__releases(&pool->lock)
1847__acquires(&pool->lock)
1848{
1849restart:
1850        spin_unlock_irq(&pool->lock);
1851
1852        /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1853        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1854
1855        while (true) {
1856                if (create_worker(pool) || !need_to_create_worker(pool))
1857                        break;
1858
1859                schedule_timeout_interruptible(CREATE_COOLDOWN);
1860
1861                if (!need_to_create_worker(pool))
1862                        break;
1863        }
1864
1865        del_timer_sync(&pool->mayday_timer);
1866        spin_lock_irq(&pool->lock);
1867        /*
1868         * This is necessary even after a new worker was just successfully
1869         * created as @pool->lock was dropped and the new worker might have
1870         * already become busy.
1871         */
1872        if (need_to_create_worker(pool))
1873                goto restart;
1874}
1875
1876/**
1877 * manage_workers - manage worker pool
1878 * @worker: self
1879 *
1880 * Assume the manager role and manage the worker pool @worker belongs
1881 * to.  At any given time, there can be only zero or one manager per
1882 * pool.  The exclusion is handled automatically by this function.
1883 *
1884 * The caller can safely start processing works on false return.  On
1885 * true return, it's guaranteed that need_to_create_worker() is false
1886 * and may_start_working() is true.
1887 *
1888 * CONTEXT:
1889 * spin_lock_irq(pool->lock) which may be released and regrabbed
1890 * multiple times.  Does GFP_KERNEL allocations.
1891 *
1892 * Return:
1893 * %false if the pool doesn't need management and the caller can safely
1894 * start processing works, %true if management function was performed and
1895 * the conditions that the caller verified before calling the function may
1896 * no longer be true.
1897 */
1898static bool manage_workers(struct worker *worker)
1899{
1900        struct worker_pool *pool = worker->pool;
1901
1902        /*
1903         * Anyone who successfully grabs manager_arb wins the arbitration
1904         * and becomes the manager.  mutex_trylock() on pool->manager_arb
1905         * failure while holding pool->lock reliably indicates that someone
1906         * else is managing the pool and the worker which failed trylock
1907         * can proceed to executing work items.  This means that anyone
1908         * grabbing manager_arb is responsible for actually performing
1909         * manager duties.  If manager_arb is grabbed and released without
1910         * actual management, the pool may stall indefinitely.
1911         */
1912        if (!mutex_trylock(&pool->manager_arb))
1913                return false;
1914
1915        maybe_create_worker(pool);
1916
1917        mutex_unlock(&pool->manager_arb);
1918        return true;
1919}
1920
1921/**
1922 * process_one_work - process single work
1923 * @worker: self
1924 * @work: work to process
1925 *
1926 * Process @work.  This function contains all the logics necessary to
1927 * process a single work including synchronization against and
1928 * interaction with other workers on the same cpu, queueing and
1929 * flushing.  As long as context requirement is met, any worker can
1930 * call this function to process a work.
1931 *
1932 * CONTEXT:
1933 * spin_lock_irq(pool->lock) which is released and regrabbed.
1934 */
1935static void process_one_work(struct worker *worker, struct work_struct *work)
1936__releases(&pool->lock)
1937__acquires(&pool->lock)
1938{
1939        struct pool_workqueue *pwq = get_work_pwq(work);
1940        struct worker_pool *pool = worker->pool;
1941        bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
1942        int work_color;
1943        struct worker *collision;
1944#ifdef CONFIG_LOCKDEP
1945        /*
1946         * It is permissible to free the struct work_struct from
1947         * inside the function that is called from it, this we need to
1948         * take into account for lockdep too.  To avoid bogus "held
1949         * lock freed" warnings as well as problems when looking into
1950         * work->lockdep_map, make a copy and use that here.
1951         */
1952        struct lockdep_map lockdep_map;
1953
1954        lockdep_copy_map(&lockdep_map, &work->lockdep_map);
1955#endif
1956        /* ensure we're on the correct CPU */
1957        WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1958                     raw_smp_processor_id() != pool->cpu);
1959
1960        /*
1961         * A single work shouldn't be executed concurrently by
1962         * multiple workers on a single cpu.  Check whether anyone is
1963         * already processing the work.  If so, defer the work to the
1964         * currently executing one.
1965         */
1966        collision = find_worker_executing_work(pool, work);
1967        if (unlikely(collision)) {
1968                move_linked_works(work, &collision->scheduled, NULL);
1969                return;
1970        }
1971
1972        /* claim and dequeue */
1973        debug_work_deactivate(work);
1974        hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
1975        worker->current_work = work;
1976        worker->current_func = work->func;
1977        worker->current_pwq = pwq;
1978        work_color = get_work_color(work);
1979
1980        list_del_init(&work->entry);
1981
1982        /*
1983         * CPU intensive works don't participate in concurrency management.
1984         * They're the scheduler's responsibility.  This takes @worker out
1985         * of concurrency management and the next code block will chain
1986         * execution of the pending work items.
1987         */
1988        if (unlikely(cpu_intensive))
1989                worker_set_flags(worker, WORKER_CPU_INTENSIVE);
1990
1991        /*
1992         * Wake up another worker if necessary.  The condition is always
1993         * false for normal per-cpu workers since nr_running would always
1994         * be >= 1 at this point.  This is used to chain execution of the
1995         * pending work items for WORKER_NOT_RUNNING workers such as the
1996         * UNBOUND and CPU_INTENSIVE ones.
1997         */
1998        if (need_more_worker(pool))
1999                wake_up_worker(pool);
2000
2001        /*
2002         * Record the last pool and clear PENDING which should be the last
2003         * update to @work.  Also, do this inside @pool->lock so that
2004         * PENDING and queued state changes happen together while IRQ is
2005         * disabled.
2006         */
2007        set_work_pool_and_clear_pending(work, pool->id);
2008
2009        spin_unlock_irq(&pool->lock);
2010
2011        lock_map_acquire_read(&pwq->wq->lockdep_map);
2012        lock_map_acquire(&lockdep_map);
2013        trace_workqueue_execute_start(work);
2014        worker->current_func(work);
2015        /*
2016         * While we must be careful to not use "work" after this, the trace
2017         * point will only record its address.
2018         */
2019        trace_workqueue_execute_end(work);
2020        lock_map_release(&lockdep_map);
2021        lock_map_release(&pwq->wq->lockdep_map);
2022
2023        if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2024                pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2025                       "     last function: %pf\n",
2026                       current->comm, preempt_count(), task_pid_nr(current),
2027                       worker->current_func);
2028                debug_show_held_locks(current);
2029                dump_stack();
2030        }
2031
2032        /*
2033         * The following prevents a kworker from hogging CPU on !PREEMPT
2034         * kernels, where a requeueing work item waiting for something to
2035         * happen could deadlock with stop_machine as such work item could
2036         * indefinitely requeue itself while all other CPUs are trapped in
2037         * stop_machine. At the same time, report a quiescent RCU state so
2038         * the same condition doesn't freeze RCU.
2039         */
2040        cond_resched_rcu_qs();
2041
2042        spin_lock_irq(&pool->lock);
2043
2044        /* clear cpu intensive status */
2045        if (unlikely(cpu_intensive))
2046                worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2047
2048        /* we're done with it, release */
2049        hash_del(&worker->hentry);
2050        worker->current_work = NULL;
2051        worker->current_func = NULL;
2052        worker->current_pwq = NULL;
2053        worker->desc_valid = false;
2054        pwq_dec_nr_in_flight(pwq, work_color);
2055}
2056
2057/**
2058 * process_scheduled_works - process scheduled works
2059 * @worker: self
2060 *
2061 * Process all scheduled works.  Please note that the scheduled list
2062 * may change while processing a work, so this function repeatedly
2063 * fetches a work from the top and executes it.
2064 *
2065 * CONTEXT:
2066 * spin_lock_irq(pool->lock) which may be released and regrabbed
2067 * multiple times.
2068 */
2069static void process_scheduled_works(struct worker *worker)
2070{
2071        while (!list_empty(&worker->scheduled)) {
2072                struct work_struct *work = list_first_entry(&worker->scheduled,
2073                                                struct work_struct, entry);
2074                process_one_work(worker, work);
2075        }
2076}
2077
2078/**
2079 * worker_thread - the worker thread function
2080 * @__worker: self
2081 *
2082 * The worker thread function.  All workers belong to a worker_pool -
2083 * either a per-cpu one or dynamic unbound one.  These workers process all
2084 * work items regardless of their specific target workqueue.  The only
2085 * exception is work items which belong to workqueues with a rescuer which
2086 * will be explained in rescuer_thread().
2087 *
2088 * Return: 0
2089 */
2090static int worker_thread(void *__worker)
2091{
2092        struct worker *worker = __worker;
2093        struct worker_pool *pool = worker->pool;
2094
2095        /* tell the scheduler that this is a workqueue worker */
2096        worker->task->flags |= PF_WQ_WORKER;
2097woke_up:
2098        spin_lock_irq(&pool->lock);
2099
2100        /* am I supposed to die? */
2101        if (unlikely(worker->flags & WORKER_DIE)) {
2102                spin_unlock_irq(&pool->lock);
2103                WARN_ON_ONCE(!list_empty(&worker->entry));
2104                worker->task->flags &= ~PF_WQ_WORKER;
2105
2106                set_task_comm(worker->task, "kworker/dying");
2107                ida_simple_remove(&pool->worker_ida, worker->id);
2108                worker_detach_from_pool(worker, pool);
2109                kfree(worker);
2110                return 0;
2111        }
2112
2113        worker_leave_idle(worker);
2114recheck:
2115        /* no more worker necessary? */
2116        if (!need_more_worker(pool))
2117                goto sleep;
2118
2119        /* do we need to manage? */
2120        if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2121                goto recheck;
2122
2123        /*
2124         * ->scheduled list can only be filled while a worker is
2125         * preparing to process a work or actually processing it.
2126         * Make sure nobody diddled with it while I was sleeping.
2127         */
2128        WARN_ON_ONCE(!list_empty(&worker->scheduled));
2129
2130        /*
2131         * Finish PREP stage.  We're guaranteed to have at least one idle
2132         * worker or that someone else has already assumed the manager
2133         * role.  This is where @worker starts participating in concurrency
2134         * management if applicable and concurrency management is restored
2135         * after being rebound.  See rebind_workers() for details.
2136         */
2137        worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2138
2139        do {
2140                struct work_struct *work =
2141                        list_first_entry(&pool->worklist,
2142                                         struct work_struct, entry);
2143
2144                if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2145                        /* optimization path, not strictly necessary */
2146                        process_one_work(worker, work);
2147                        if (unlikely(!list_empty(&worker->scheduled)))
2148                                process_scheduled_works(worker);
2149                } else {
2150                        move_linked_works(work, &worker->scheduled, NULL);
2151                        process_scheduled_works(worker);
2152                }
2153        } while (keep_working(pool));
2154
2155        worker_set_flags(worker, WORKER_PREP);
2156sleep:
2157        /*
2158         * pool->lock is held and there's no work to process and no need to
2159         * manage, sleep.  Workers are woken up only while holding
2160         * pool->lock or from local cpu, so setting the current state
2161         * before releasing pool->lock is enough to prevent losing any
2162         * event.
2163         */
2164        worker_enter_idle(worker);
2165        __set_current_state(TASK_INTERRUPTIBLE);
2166        spin_unlock_irq(&pool->lock);
2167        schedule();
2168        goto woke_up;
2169}
2170
2171/**
2172 * rescuer_thread - the rescuer thread function
2173 * @__rescuer: self
2174 *
2175 * Workqueue rescuer thread function.  There's one rescuer for each
2176 * workqueue which has WQ_MEM_RECLAIM set.
2177 *
2178 * Regular work processing on a pool may block trying to create a new
2179 * worker which uses GFP_KERNEL allocation which has slight chance of
2180 * developing into deadlock if some works currently on the same queue
2181 * need to be processed to satisfy the GFP_KERNEL allocation.  This is
2182 * the problem rescuer solves.
2183 *
2184 * When such condition is possible, the pool summons rescuers of all
2185 * workqueues which have works queued on the pool and let them process
2186 * those works so that forward progress can be guaranteed.
2187 *
2188 * This should happen rarely.
2189 *
2190 * Return: 0
2191 */
2192static int rescuer_thread(void *__rescuer)
2193{
2194        struct worker *rescuer = __rescuer;
2195        struct workqueue_struct *wq = rescuer->rescue_wq;
2196        struct list_head *scheduled = &rescuer->scheduled;
2197        bool should_stop;
2198
2199        set_user_nice(current, RESCUER_NICE_LEVEL);
2200
2201        /*
2202         * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
2203         * doesn't participate in concurrency management.
2204         */
2205        rescuer->task->flags |= PF_WQ_WORKER;
2206repeat:
2207        set_current_state(TASK_INTERRUPTIBLE);
2208
2209        /*
2210         * By the time the rescuer is requested to stop, the workqueue
2211         * shouldn't have any work pending, but @wq->maydays may still have
2212         * pwq(s) queued.  This can happen by non-rescuer workers consuming
2213         * all the work items before the rescuer got to them.  Go through
2214         * @wq->maydays processing before acting on should_stop so that the
2215         * list is always empty on exit.
2216         */
2217        should_stop = kthread_should_stop();
2218
2219        /* see whether any pwq is asking for help */
2220        spin_lock_irq(&wq_mayday_lock);
2221
2222        while (!list_empty(&wq->maydays)) {
2223                struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2224                                        struct pool_workqueue, mayday_node);
2225                struct worker_pool *pool = pwq->pool;
2226                struct work_struct *work, *n;
2227
2228                __set_current_state(TASK_RUNNING);
2229                list_del_init(&pwq->mayday_node);
2230
2231                spin_unlock_irq(&wq_mayday_lock);
2232
2233                worker_attach_to_pool(rescuer, pool);
2234
2235                spin_lock_irq(&pool->lock);
2236                rescuer->pool = pool;
2237
2238                /*
2239                 * Slurp in all works issued via this workqueue and
2240                 * process'em.
2241                 */
2242                WARN_ON_ONCE(!list_empty(scheduled));
2243                list_for_each_entry_safe(work, n, &pool->worklist, entry)
2244                        if (get_work_pwq(work) == pwq)
2245                                move_linked_works(work, scheduled, &n);
2246
2247                if (!list_empty(scheduled)) {
2248                        process_scheduled_works(rescuer);
2249
2250                        /*
2251                         * The above execution of rescued work items could
2252                         * have created more to rescue through
2253                         * pwq_activate_first_delayed() or chained
2254                         * queueing.  Let's put @pwq back on mayday list so
2255                         * that such back-to-back work items, which may be
2256                         * being used to relieve memory pressure, don't
2257                         * incur MAYDAY_INTERVAL delay inbetween.
2258                         */
2259                        if (need_to_create_worker(pool)) {
2260                                spin_lock(&wq_mayday_lock);
2261                                get_pwq(pwq);
2262                                list_move_tail(&pwq->mayday_node, &wq->maydays);
2263                                spin_unlock(&wq_mayday_lock);
2264                        }
2265                }
2266
2267                /*
2268                 * Put the reference grabbed by send_mayday().  @pool won't
2269                 * go away while we're still attached to it.
2270                 */
2271                put_pwq(pwq);
2272
2273                /*
2274                 * Leave this pool.  If need_more_worker() is %true, notify a
2275                 * regular worker; otherwise, we end up with 0 concurrency
2276                 * and stalling the execution.
2277                 */
2278                if (need_more_worker(pool))
2279                        wake_up_worker(pool);
2280
2281                rescuer->pool = NULL;
2282                spin_unlock_irq(&pool->lock);
2283
2284                worker_detach_from_pool(rescuer, pool);
2285
2286                spin_lock_irq(&wq_mayday_lock);
2287        }
2288
2289        spin_unlock_irq(&wq_mayday_lock);
2290
2291        if (should_stop) {
2292                __set_current_state(TASK_RUNNING);
2293                rescuer->task->flags &= ~PF_WQ_WORKER;
2294                return 0;
2295        }
2296
2297        /* rescuers should never participate in concurrency management */
2298        WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2299        schedule();
2300        goto repeat;
2301}
2302
2303struct wq_barrier {
2304        struct work_struct      work;
2305        struct completion       done;
2306};
2307
2308static void wq_barrier_func(struct work_struct *work)
2309{
2310        struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2311        complete(&barr->done);
2312}
2313
2314/**
2315 * insert_wq_barrier - insert a barrier work
2316 * @pwq: pwq to insert barrier into
2317 * @barr: wq_barrier to insert
2318 * @target: target work to attach @barr to
2319 * @worker: worker currently executing @target, NULL if @target is not executing
2320 *
2321 * @barr is linked to @target such that @barr is completed only after
2322 * @target finishes execution.  Please note that the ordering
2323 * guarantee is observed only with respect to @target and on the local
2324 * cpu.
2325 *
2326 * Currently, a queued barrier can't be canceled.  This is because
2327 * try_to_grab_pending() can't determine whether the work to be
2328 * grabbed is at the head of the queue and thus can't clear LINKED
2329 * flag of the previous work while there must be a valid next work
2330 * after a work with LINKED flag set.
2331 *
2332 * Note that when @worker is non-NULL, @target may be modified
2333 * underneath us, so we can't reliably determine pwq from @target.
2334 *
2335 * CONTEXT:
2336 * spin_lock_irq(pool->lock).
2337 */
2338static void insert_wq_barrier(struct pool_workqueue *pwq,
2339                              struct wq_barrier *barr,
2340                              struct work_struct *target, struct worker *worker)
2341{
2342        struct list_head *head;
2343        unsigned int linked = 0;
2344
2345        /*
2346         * debugobject calls are safe here even with pool->lock locked
2347         * as we know for sure that this will not trigger any of the
2348         * checks and call back into the fixup functions where we
2349         * might deadlock.
2350         */
2351        INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2352        __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2353        init_completion(&barr->done);
2354
2355        /*
2356         * If @target is currently being executed, schedule the
2357         * barrier to the worker; otherwise, put it after @target.
2358         */
2359        if (worker)
2360                head = worker->scheduled.next;
2361        else {
2362                unsigned long *bits = work_data_bits(target);
2363
2364                head = target->entry.next;
2365                /* there can already be other linked works, inherit and set */
2366                linked = *bits & WORK_STRUCT_LINKED;
2367                __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2368        }
2369
2370        debug_work_activate(&barr->work);
2371        insert_work(pwq, &barr->work, head,
2372                    work_color_to_flags(WORK_NO_COLOR) | linked);
2373}
2374
2375/**
2376 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2377 * @wq: workqueue being flushed
2378 * @flush_color: new flush color, < 0 for no-op
2379 * @work_color: new work color, < 0 for no-op
2380 *
2381 * Prepare pwqs for workqueue flushing.
2382 *
2383 * If @flush_color is non-negative, flush_color on all pwqs should be
2384 * -1.  If no pwq has in-flight commands at the specified color, all
2385 * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
2386 * has in flight commands, its pwq->flush_color is set to
2387 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2388 * wakeup logic is armed and %true is returned.
2389 *
2390 * The caller should have initialized @wq->first_flusher prior to
2391 * calling this function with non-negative @flush_color.  If
2392 * @flush_color is negative, no flush color update is done and %false
2393 * is returned.
2394 *
2395 * If @work_color is non-negative, all pwqs should have the same
2396 * work_color which is previous to @work_color and all will be
2397 * advanced to @work_color.
2398 *
2399 * CONTEXT:
2400 * mutex_lock(wq->mutex).
2401 *
2402 * Return:
2403 * %true if @flush_color >= 0 and there's something to flush.  %false
2404 * otherwise.
2405 */
2406static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2407                                      int flush_color, int work_color)
2408{
2409        bool wait = false;
2410        struct pool_workqueue *pwq;
2411
2412        if (flush_color >= 0) {
2413                WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2414                atomic_set(&wq->nr_pwqs_to_flush, 1);
2415        }
2416
2417        for_each_pwq(pwq, wq) {
2418                struct worker_pool *pool = pwq->pool;
2419
2420                spin_lock_irq(&pool->lock);
2421
2422                if (flush_color >= 0) {
2423                        WARN_ON_ONCE(pwq->flush_color != -1);
2424
2425                        if (pwq->nr_in_flight[flush_color]) {
2426                                pwq->flush_color = flush_color;
2427                                atomic_inc(&wq->nr_pwqs_to_flush);
2428                                wait = true;
2429                        }
2430                }
2431
2432                if (work_color >= 0) {
2433                        WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2434                        pwq->work_color = work_color;
2435                }
2436
2437                spin_unlock_irq(&pool->lock);
2438        }
2439
2440        if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2441                complete(&wq->first_flusher->done);
2442
2443        return wait;
2444}
2445
2446/**
2447 * flush_workqueue - ensure that any scheduled work has run to completion.
2448 * @wq: workqueue to flush
2449 *
2450 * This function sleeps until all work items which were queued on entry
2451 * have finished execution, but it is not livelocked by new incoming ones.
2452 */
2453void flush_workqueue(struct workqueue_struct *wq)
2454{
2455        struct wq_flusher this_flusher = {
2456                .list = LIST_HEAD_INIT(this_flusher.list),
2457                .flush_color = -1,
2458                .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2459        };
2460        int next_color;
2461
2462        lock_map_acquire(&wq->lockdep_map);
2463        lock_map_release(&wq->lockdep_map);
2464
2465        mutex_lock(&wq->mutex);
2466
2467        /*
2468         * Start-to-wait phase
2469         */
2470        next_color = work_next_color(wq->work_color);
2471
2472        if (next_color != wq->flush_color) {
2473                /*
2474                 * Color space is not full.  The current work_color
2475                 * becomes our flush_color and work_color is advanced
2476                 * by one.
2477                 */
2478                WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2479                this_flusher.flush_color = wq->work_color;
2480                wq->work_color = next_color;
2481
2482                if (!wq->first_flusher) {
2483                        /* no flush in progress, become the first flusher */
2484                        WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2485
2486                        wq->first_flusher = &this_flusher;
2487
2488                        if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2489                                                       wq->work_color)) {
2490                                /* nothing to flush, done */
2491                                wq->flush_color = next_color;
2492                                wq->first_flusher = NULL;
2493                                goto out_unlock;
2494                        }
2495                } else {
2496                        /* wait in queue */
2497                        WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2498                        list_add_tail(&this_flusher.list, &wq->flusher_queue);
2499                        flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2500                }
2501        } else {
2502                /*
2503                 * Oops, color space is full, wait on overflow queue.
2504                 * The next flush completion will assign us
2505                 * flush_color and transfer to flusher_queue.
2506                 */
2507                list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2508        }
2509
2510        mutex_unlock(&wq->mutex);
2511
2512        wait_for_completion(&this_flusher.done);
2513
2514        /*
2515         * Wake-up-and-cascade phase
2516         *
2517         * First flushers are responsible for cascading flushes and
2518         * handling overflow.  Non-first flushers can simply return.
2519         */
2520        if (wq->first_flusher != &this_flusher)
2521                return;
2522
2523        mutex_lock(&wq->mutex);
2524
2525        /* we might have raced, check again with mutex held */
2526        if (wq->first_flusher != &this_flusher)
2527                goto out_unlock;
2528
2529        wq->first_flusher = NULL;
2530
2531        WARN_ON_ONCE(!list_empty(&this_flusher.list));
2532        WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2533
2534        while (true) {
2535                struct wq_flusher *next, *tmp;
2536
2537                /* complete all the flushers sharing the current flush color */
2538                list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2539                        if (next->flush_color != wq->flush_color)
2540                                break;
2541                        list_del_init(&next->list);
2542                        complete(&next->done);
2543                }
2544
2545                WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2546                             wq->flush_color != work_next_color(wq->work_color));
2547
2548                /* this flush_color is finished, advance by one */
2549                wq->flush_color = work_next_color(wq->flush_color);
2550
2551                /* one color has been freed, handle overflow queue */
2552                if (!list_empty(&wq->flusher_overflow)) {
2553                        /*
2554                         * Assign the same color to all overflowed
2555                         * flushers, advance work_color and append to
2556                         * flusher_queue.  This is the start-to-wait
2557                         * phase for these overflowed flushers.
2558                         */
2559                        list_for_each_entry(tmp, &wq->flusher_overflow, list)
2560                                tmp->flush_color = wq->work_color;
2561
2562                        wq->work_color = work_next_color(wq->work_color);
2563
2564                        list_splice_tail_init(&wq->flusher_overflow,
2565                                              &wq->flusher_queue);
2566                        flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2567                }
2568
2569                if (list_empty(&wq->flusher_queue)) {
2570                        WARN_ON_ONCE(wq->flush_color != wq->work_color);
2571                        break;
2572                }
2573
2574                /*
2575                 * Need to flush more colors.  Make the next flusher
2576                 * the new first flusher and arm pwqs.
2577                 */
2578                WARN_ON_ONCE(wq->flush_color == wq->work_color);
2579                WARN_ON_ONCE(wq->flush_color != next->flush_color);
2580
2581                list_del_init(&next->list);
2582                wq->first_flusher = next;
2583
2584                if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2585                        break;
2586
2587                /*
2588                 * Meh... this color is already done, clear first
2589                 * flusher and repeat cascading.
2590                 */
2591                wq->first_flusher = NULL;
2592        }
2593
2594out_unlock:
2595        mutex_unlock(&wq->mutex);
2596}
2597EXPORT_SYMBOL_GPL(flush_workqueue);
2598
2599/**
2600 * drain_workqueue - drain a workqueue
2601 * @wq: workqueue to drain
2602 *
2603 * Wait until the workqueue becomes empty.  While draining is in progress,
2604 * only chain queueing is allowed.  IOW, only currently pending or running
2605 * work items on @wq can queue further work items on it.  @wq is flushed
2606 * repeatedly until it becomes empty.  The number of flushing is detemined
2607 * by the depth of chaining and should be relatively short.  Whine if it
2608 * takes too long.
2609 */
2610void drain_workqueue(struct workqueue_struct *wq)
2611{
2612        unsigned int flush_cnt = 0;
2613        struct pool_workqueue *pwq;
2614
2615        /*
2616         * __queue_work() needs to test whether there are drainers, is much
2617         * hotter than drain_workqueue() and already looks at @wq->flags.
2618         * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2619         */
2620        mutex_lock(&wq->mutex);
2621        if (!wq->nr_drainers++)
2622                wq->flags |= __WQ_DRAINING;
2623        mutex_unlock(&wq->mutex);
2624reflush:
2625        flush_workqueue(wq);
2626
2627        mutex_lock(&wq->mutex);
2628
2629        for_each_pwq(pwq, wq) {
2630                bool drained;
2631
2632                spin_lock_irq(&pwq->pool->lock);
2633                drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2634                spin_unlock_irq(&pwq->pool->lock);
2635
2636                if (drained)
2637                        continue;
2638
2639                if (++flush_cnt == 10 ||
2640                    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2641                        pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
2642                                wq->name, flush_cnt);
2643
2644                mutex_unlock(&wq->mutex);
2645                goto reflush;
2646        }
2647
2648        if (!--wq->nr_drainers)
2649                wq->flags &= ~__WQ_DRAINING;
2650        mutex_unlock(&wq->mutex);
2651}
2652EXPORT_SYMBOL_GPL(drain_workqueue);
2653
2654static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2655{
2656        struct worker *worker = NULL;
2657        struct worker_pool *pool;
2658        struct pool_workqueue *pwq;
2659
2660        might_sleep();
2661
2662        local_irq_disable();
2663        pool = get_work_pool(work);
2664        if (!pool) {
2665                local_irq_enable();
2666                return false;
2667        }
2668
2669        spin_lock(&pool->lock);
2670        /* see the comment in try_to_grab_pending() with the same code */
2671        pwq = get_work_pwq(work);
2672        if (pwq) {
2673                if (unlikely(pwq->pool != pool))
2674                        goto already_gone;
2675        } else {
2676                worker = find_worker_executing_work(pool, work);
2677                if (!worker)
2678                        goto already_gone;
2679                pwq = worker->current_pwq;
2680        }
2681
2682        insert_wq_barrier(pwq, barr, work, worker);
2683        spin_unlock_irq(&pool->lock);
2684
2685        /*
2686         * If @max_active is 1 or rescuer is in use, flushing another work
2687         * item on the same workqueue may lead to deadlock.  Make sure the
2688         * flusher is not running on the same workqueue by verifying write
2689         * access.
2690         */
2691        if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)
2692                lock_map_acquire(&pwq->wq->lockdep_map);
2693        else
2694                lock_map_acquire_read(&pwq->wq->lockdep_map);
2695        lock_map_release(&pwq->wq->lockdep_map);
2696
2697        return true;
2698already_gone:
2699        spin_unlock_irq(&pool->lock);
2700        return false;
2701}
2702
2703/**
2704 * flush_work - wait for a work to finish executing the last queueing instance
2705 * @work: the work to flush
2706 *
2707 * Wait until @work has finished execution.  @work is guaranteed to be idle
2708 * on return if it hasn't been requeued since flush started.
2709 *
2710 * Return:
2711 * %true if flush_work() waited for the work to finish execution,
2712 * %false if it was already idle.
2713 */
2714bool flush_work(struct work_struct *work)
2715{
2716        struct wq_barrier barr;
2717
2718        lock_map_acquire(&work->lockdep_map);
2719        lock_map_release(&work->lockdep_map);
2720
2721        if (start_flush_work(work, &barr)) {
2722                wait_for_completion(&barr.done);
2723                destroy_work_on_stack(&barr.work);
2724                return true;
2725        } else {
2726                return false;
2727        }
2728}
2729EXPORT_SYMBOL_GPL(flush_work);
2730
2731struct cwt_wait {
2732        wait_queue_t            wait;
2733        struct work_struct      *work;
2734};
2735
2736static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
2737{
2738        struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
2739
2740        if (cwait->work != key)
2741                return 0;
2742        return autoremove_wake_function(wait, mode, sync, key);
2743}
2744
2745static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2746{
2747        static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
2748        unsigned long flags;
2749        int ret;
2750
2751        do {
2752                ret = try_to_grab_pending(work, is_dwork, &flags);
2753                /*
2754                 * If someone else is already canceling, wait for it to
2755                 * finish.  flush_work() doesn't work for PREEMPT_NONE
2756                 * because we may get scheduled between @work's completion
2757                 * and the other canceling task resuming and clearing
2758                 * CANCELING - flush_work() will return false immediately
2759                 * as @work is no longer busy, try_to_grab_pending() will
2760                 * return -ENOENT as @work is still being canceled and the
2761                 * other canceling task won't be able to clear CANCELING as
2762                 * we're hogging the CPU.
2763                 *
2764                 * Let's wait for completion using a waitqueue.  As this
2765                 * may lead to the thundering herd problem, use a custom
2766                 * wake function which matches @work along with exclusive
2767                 * wait and wakeup.
2768                 */
2769                if (unlikely(ret == -ENOENT)) {
2770                        struct cwt_wait cwait;
2771
2772                        init_wait(&cwait.wait);
2773                        cwait.wait.func = cwt_wakefn;
2774                        cwait.work = work;
2775
2776                        prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
2777                                                  TASK_UNINTERRUPTIBLE);
2778                        if (work_is_canceling(work))
2779                                schedule();
2780                        finish_wait(&cancel_waitq, &cwait.wait);
2781                }
2782        } while (unlikely(ret < 0));
2783
2784        /* tell other tasks trying to grab @work to back off */
2785        mark_work_canceling(work);
2786        local_irq_restore(flags);
2787
2788        flush_work(work);
2789        clear_work_data(work);
2790
2791        /*
2792         * Paired with prepare_to_wait() above so that either
2793         * waitqueue_active() is visible here or !work_is_canceling() is
2794         * visible there.
2795         */
2796        smp_mb();
2797        if (waitqueue_active(&cancel_waitq))
2798                __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
2799
2800        return ret;
2801}
2802
2803/**
2804 * cancel_work_sync - cancel a work and wait for it to finish
2805 * @work: the work to cancel
2806 *
2807 * Cancel @work and wait for its execution to finish.  This function
2808 * can be used even if the work re-queues itself or migrates to
2809 * another workqueue.  On return from this function, @work is
2810 * guaranteed to be not pending or executing on any CPU.
2811 *
2812 * cancel_work_sync(&delayed_work->work) must not be used for
2813 * delayed_work's.  Use cancel_delayed_work_sync() instead.
2814 *
2815 * The caller must ensure that the workqueue on which @work was last
2816 * queued can't be destroyed before this function returns.
2817 *
2818 * Return:
2819 * %true if @work was pending, %false otherwise.
2820 */
2821bool cancel_work_sync(struct work_struct *work)
2822{
2823        return __cancel_work_timer(work, false);
2824}
2825EXPORT_SYMBOL_GPL(cancel_work_sync);
2826
2827/**
2828 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2829 * @dwork: the delayed work to flush
2830 *
2831 * Delayed timer is cancelled and the pending work is queued for
2832 * immediate execution.  Like flush_work(), this function only
2833 * considers the last queueing instance of @dwork.
2834 *
2835 * Return:
2836 * %true if flush_work() waited for the work to finish execution,
2837 * %false if it was already idle.
2838 */
2839bool flush_delayed_work(struct delayed_work *dwork)
2840{
2841        local_irq_disable();
2842        if (del_timer_sync(&dwork->timer))
2843                __queue_work(dwork->cpu, dwork->wq, &dwork->work);
2844        local_irq_enable();
2845        return flush_work(&dwork->work);
2846}
2847EXPORT_SYMBOL(flush_delayed_work);
2848
2849/**
2850 * cancel_delayed_work - cancel a delayed work
2851 * @dwork: delayed_work to cancel
2852 *
2853 * Kill off a pending delayed_work.
2854 *
2855 * Return: %true if @dwork was pending and canceled; %false if it wasn't
2856 * pending.
2857 *
2858 * Note:
2859 * The work callback function may still be running on return, unless
2860 * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
2861 * use cancel_delayed_work_sync() to wait on it.
2862 *
2863 * This function is safe to call from any context including IRQ handler.
2864 */
2865bool cancel_delayed_work(struct delayed_work *dwork)
2866{
2867        unsigned long flags;
2868        int ret;
2869
2870        do {
2871                ret = try_to_grab_pending(&dwork->work, true, &flags);
2872        } while (unlikely(ret == -EAGAIN));
2873
2874        if (unlikely(ret < 0))
2875                return false;
2876
2877        set_work_pool_and_clear_pending(&dwork->work,
2878                                        get_work_pool_id(&dwork->work));
2879        local_irq_restore(flags);
2880        return ret;
2881}
2882EXPORT_SYMBOL(cancel_delayed_work);
2883
2884/**
2885 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2886 * @dwork: the delayed work cancel
2887 *
2888 * This is cancel_work_sync() for delayed works.
2889 *
2890 * Return:
2891 * %true if @dwork was pending, %false otherwise.
2892 */
2893bool cancel_delayed_work_sync(struct delayed_work *dwork)
2894{
2895        return __cancel_work_timer(&dwork->work, true);
2896}
2897EXPORT_SYMBOL(cancel_delayed_work_sync);
2898
2899/**
2900 * schedule_on_each_cpu - execute a function synchronously on each online CPU
2901 * @func: the function to call
2902 *
2903 * schedule_on_each_cpu() executes @func on each online CPU using the
2904 * system workqueue and blocks until all CPUs have completed.
2905 * schedule_on_each_cpu() is very slow.
2906 *
2907 * Return:
2908 * 0 on success, -errno on failure.
2909 */
2910int schedule_on_each_cpu(work_func_t func)
2911{
2912        int cpu;
2913        struct work_struct __percpu *works;
2914
2915        works = alloc_percpu(struct work_struct);
2916        if (!works)
2917                return -ENOMEM;
2918
2919        get_online_cpus();
2920
2921        for_each_online_cpu(cpu) {
2922                struct work_struct *work = per_cpu_ptr(works, cpu);
2923
2924                INIT_WORK(work, func);
2925                schedule_work_on(cpu, work);
2926        }
2927
2928        for_each_online_cpu(cpu)
2929                flush_work(per_cpu_ptr(works, cpu));
2930
2931        put_online_cpus();
2932        free_percpu(works);
2933        return 0;
2934}
2935
2936/**
2937 * flush_scheduled_work - ensure that any scheduled work has run to completion.
2938 *
2939 * Forces execution of the kernel-global workqueue and blocks until its
2940 * completion.
2941 *
2942 * Think twice before calling this function!  It's very easy to get into
2943 * trouble if you don't take great care.  Either of the following situations
2944 * will lead to deadlock:
2945 *
2946 *      One of the work items currently on the workqueue needs to acquire
2947 *      a lock held by your code or its caller.
2948 *
2949 *      Your code is running in the context of a work routine.
2950 *
2951 * They will be detected by lockdep when they occur, but the first might not
2952 * occur very often.  It depends on what work items are on the workqueue and
2953 * what locks they need, which you have no control over.
2954 *
2955 * In most situations flushing the entire workqueue is overkill; you merely
2956 * need to know that a particular work item isn't queued and isn't running.
2957 * In such cases you should use cancel_delayed_work_sync() or
2958 * cancel_work_sync() instead.
2959 */
2960void flush_scheduled_work(void)
2961{
2962        flush_workqueue(system_wq);
2963}
2964EXPORT_SYMBOL(flush_scheduled_work);
2965
2966/**
2967 * execute_in_process_context - reliably execute the routine with user context
2968 * @fn:         the function to execute
2969 * @ew:         guaranteed storage for the execute work structure (must
2970 *              be available when the work executes)
2971 *
2972 * Executes the function immediately if process context is available,
2973 * otherwise schedules the function for delayed execution.
2974 *
2975 * Return:      0 - function was executed
2976 *              1 - function was scheduled for execution
2977 */
2978int execute_in_process_context(work_func_t fn, struct execute_work *ew)
2979{
2980        if (!in_interrupt()) {
2981                fn(&ew->work);
2982                return 0;
2983        }
2984
2985        INIT_WORK(&ew->work, fn);
2986        schedule_work(&ew->work);
2987
2988        return 1;
2989}
2990EXPORT_SYMBOL_GPL(execute_in_process_context);
2991
2992#ifdef CONFIG_SYSFS
2993/*
2994 * Workqueues with WQ_SYSFS flag set is visible to userland via
2995 * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
2996 * following attributes.
2997 *
2998 *  per_cpu     RO bool : whether the workqueue is per-cpu or unbound
2999 *  max_active  RW int  : maximum number of in-flight work items
3000 *
3001 * Unbound workqueues have the following extra attributes.
3002 *
3003 *  id          RO int  : the associated pool ID
3004 *  nice        RW int  : nice value of the workers
3005 *  cpumask     RW mask : bitmask of allowed CPUs for the workers
3006 */
3007struct wq_device {
3008        struct workqueue_struct         *wq;
3009        struct device                   dev;
3010};
3011
3012static struct workqueue_struct *dev_to_wq(struct device *dev)
3013{
3014        struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
3015
3016        return wq_dev->wq;
3017}
3018
3019static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
3020                            char *buf)
3021{
3022        struct workqueue_struct *wq = dev_to_wq(dev);
3023
3024        return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
3025}
3026static DEVICE_ATTR_RO(per_cpu);
3027
3028static ssize_t max_active_show(struct device *dev,
3029                               struct device_attribute *attr, char *buf)
3030{
3031        struct workqueue_struct *wq = dev_to_wq(dev);
3032
3033        return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
3034}
3035
3036static ssize_t max_active_store(struct device *dev,
3037                                struct device_attribute *attr, const char *buf,
3038                                size_t count)
3039{
3040        struct workqueue_struct *wq = dev_to_wq(dev);
3041        int val;
3042
3043        if (sscanf(buf, "%d", &val) != 1 || val <= 0)
3044                return -EINVAL;
3045
3046        workqueue_set_max_active(wq, val);
3047        return count;
3048}
3049static DEVICE_ATTR_RW(max_active);
3050
3051static struct attribute *wq_sysfs_attrs[] = {
3052        &dev_attr_per_cpu.attr,
3053        &dev_attr_max_active.attr,
3054        NULL,
3055};
3056ATTRIBUTE_GROUPS(wq_sysfs);
3057
3058static ssize_t wq_pool_ids_show(struct device *dev,
3059                                struct device_attribute *attr, char *buf)
3060{
3061        struct workqueue_struct *wq = dev_to_wq(dev);
3062        const char *delim = "";
3063        int node, written = 0;
3064
3065        rcu_read_lock_sched();
3066        for_each_node(node) {
3067                written += scnprintf(buf + written, PAGE_SIZE - written,
3068                                     "%s%d:%d", delim, node,
3069                                     unbound_pwq_by_node(wq, node)->pool->id);
3070                delim = " ";
3071        }
3072        written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
3073        rcu_read_unlock_sched();
3074
3075        return written;
3076}
3077
3078static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
3079                            char *buf)
3080{
3081        struct workqueue_struct *wq = dev_to_wq(dev);
3082        int written;
3083
3084        mutex_lock(&wq->mutex);
3085        written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
3086        mutex_unlock(&wq->mutex);
3087
3088        return written;
3089}
3090
3091/* prepare workqueue_attrs for sysfs store operations */
3092static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
3093{
3094        struct workqueue_attrs *attrs;
3095
3096        attrs = alloc_workqueue_attrs(GFP_KERNEL);
3097        if (!attrs)
3098                return NULL;
3099
3100        mutex_lock(&wq->mutex);
3101        copy_workqueue_attrs(attrs, wq->unbound_attrs);
3102        mutex_unlock(&wq->mutex);
3103        return attrs;
3104}
3105
3106static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
3107                             const char *buf, size_t count)
3108{
3109        struct workqueue_struct *wq = dev_to_wq(dev);
3110        struct workqueue_attrs *attrs;
3111        int ret;
3112
3113        attrs = wq_sysfs_prep_attrs(wq);
3114        if (!attrs)
3115                return -ENOMEM;
3116
3117        if (sscanf(buf, "%d", &attrs->nice) == 1 &&
3118            attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
3119                ret = apply_workqueue_attrs(wq, attrs);
3120        else
3121                ret = -EINVAL;
3122
3123        free_workqueue_attrs(attrs);
3124        return ret ?: count;
3125}
3126
3127static ssize_t wq_cpumask_show(struct device *dev,
3128                               struct device_attribute *attr, char *buf)
3129{
3130        struct workqueue_struct *wq = dev_to_wq(dev);
3131        int written;
3132
3133        mutex_lock(&wq->mutex);
3134        written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
3135                            cpumask_pr_args(wq->unbound_attrs->cpumask));
3136        mutex_unlock(&wq->mutex);
3137        return written;
3138}
3139
3140static ssize_t wq_cpumask_store(struct device *dev,
3141                                struct device_attribute *attr,
3142                                const char *buf, size_t count)
3143{
3144        struct workqueue_struct *wq = dev_to_wq(dev);
3145        struct workqueue_attrs *attrs;
3146        int ret;
3147
3148        attrs = wq_sysfs_prep_attrs(wq);
3149        if (!attrs)
3150                return -ENOMEM;
3151
3152        ret = cpumask_parse(buf, attrs->cpumask);
3153        if (!ret)
3154                ret = apply_workqueue_attrs(wq, attrs);
3155
3156        free_workqueue_attrs(attrs);
3157        return ret ?: count;
3158}
3159
3160static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
3161                            char *buf)
3162{
3163        struct workqueue_struct *wq = dev_to_wq(dev);
3164        int written;
3165
3166        mutex_lock(&wq->mutex);
3167        written = scnprintf(buf, PAGE_SIZE, "%d\n",
3168                            !wq->unbound_attrs->no_numa);
3169        mutex_unlock(&wq->mutex);
3170
3171        return written;
3172}
3173
3174static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
3175                             const char *buf, size_t count)
3176{
3177        struct workqueue_struct *wq = dev_to_wq(dev);
3178        struct workqueue_attrs *attrs;
3179        int v, ret;
3180
3181        attrs = wq_sysfs_prep_attrs(wq);
3182        if (!attrs)
3183                return -ENOMEM;
3184
3185        ret = -EINVAL;
3186        if (sscanf(buf, "%d", &v) == 1) {
3187                attrs->no_numa = !v;
3188                ret = apply_workqueue_attrs(wq, attrs);
3189        }
3190
3191        free_workqueue_attrs(attrs);
3192        return ret ?: count;
3193}
3194
3195static struct device_attribute wq_sysfs_unbound_attrs[] = {
3196        __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
3197        __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
3198        __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
3199        __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
3200        __ATTR_NULL,
3201};
3202
3203static struct bus_type wq_subsys = {
3204        .name                           = "workqueue",
3205        .dev_groups                     = wq_sysfs_groups,
3206};
3207
3208static int __init wq_sysfs_init(void)
3209{
3210        return subsys_virtual_register(&wq_subsys, NULL);
3211}
3212core_initcall(wq_sysfs_init);
3213
3214static void wq_device_release(struct device *dev)
3215{
3216        struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
3217
3218        kfree(wq_dev);
3219}
3220
3221/**
3222 * workqueue_sysfs_register - make a workqueue visible in sysfs
3223 * @wq: the workqueue to register
3224 *
3225 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
3226 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
3227 * which is the preferred method.
3228 *
3229 * Workqueue user should use this function directly iff it wants to apply
3230 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
3231 * apply_workqueue_attrs() may race against userland updating the
3232 * attributes.
3233 *
3234 * Return: 0 on success, -errno on failure.
3235 */
3236int workqueue_sysfs_register(struct workqueue_struct *wq)
3237{
3238        struct wq_device *wq_dev;
3239        int ret;
3240
3241        /*
3242         * Adjusting max_active or creating new pwqs by applyting
3243         * attributes breaks ordering guarantee.  Disallow exposing ordered
3244         * workqueues.
3245         */
3246        if (WARN_ON(wq->flags & __WQ_ORDERED))
3247                return -EINVAL;
3248
3249        wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
3250        if (!wq_dev)
3251                return -ENOMEM;
3252
3253        wq_dev->wq = wq;
3254        wq_dev->dev.bus = &wq_subsys;
3255        wq_dev->dev.init_name = wq->name;
3256        wq_dev->dev.release = wq_device_release;
3257
3258        /*
3259         * unbound_attrs are created separately.  Suppress uevent until
3260         * everything is ready.
3261         */
3262        dev_set_uevent_suppress(&wq_dev->dev, true);
3263
3264        ret = device_register(&wq_dev->dev);
3265        if (ret) {
3266                kfree(wq_dev);
3267                wq->wq_dev = NULL;
3268                return ret;
3269        }
3270
3271        if (wq->flags & WQ_UNBOUND) {
3272                struct device_attribute *attr;
3273
3274                for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
3275                        ret = device_create_file(&wq_dev->dev, attr);
3276                        if (ret) {
3277                                device_unregister(&wq_dev->dev);
3278                                wq->wq_dev = NULL;
3279                                return ret;
3280                        }
3281                }
3282        }
3283
3284        dev_set_uevent_suppress(&wq_dev->dev, false);
3285        kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
3286        return 0;
3287}
3288
3289/**
3290 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
3291 * @wq: the workqueue to unregister
3292 *
3293 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
3294 */
3295static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
3296{
3297        struct wq_device *wq_dev = wq->wq_dev;
3298
3299        if (!wq->wq_dev)
3300                return;
3301
3302        wq->wq_dev = NULL;
3303        device_unregister(&wq_dev->dev);
3304}
3305#else   /* CONFIG_SYSFS */
3306static void workqueue_sysfs_unregister(struct workqueue_struct *wq)     { }
3307#endif  /* CONFIG_SYSFS */
3308
3309/**
3310 * free_workqueue_attrs - free a workqueue_attrs
3311 * @attrs: workqueue_attrs to free
3312 *
3313 * Undo alloc_workqueue_attrs().
3314 */
3315void free_workqueue_attrs(struct workqueue_attrs *attrs)
3316{
3317        if (attrs) {
3318                free_cpumask_var(attrs->cpumask);
3319                kfree(attrs);
3320        }
3321}
3322
3323/**
3324 * alloc_workqueue_attrs - allocate a workqueue_attrs
3325 * @gfp_mask: allocation mask to use
3326 *
3327 * Allocate a new workqueue_attrs, initialize with default settings and
3328 * return it.
3329 *
3330 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3331 */
3332struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
3333{
3334        struct workqueue_attrs *attrs;
3335
3336        attrs = kzalloc(sizeof(*attrs), gfp_mask);
3337        if (!attrs)
3338                goto fail;
3339        if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
3340                goto fail;
3341
3342        cpumask_copy(attrs->cpumask, cpu_possible_mask);
3343        return attrs;
3344fail:
3345        free_workqueue_attrs(attrs);
3346        return NULL;
3347}
3348
3349static void copy_workqueue_attrs(struct workqueue_attrs *to,
3350                                 const struct workqueue_attrs *from)
3351{
3352        to->nice = from->nice;
3353        cpumask_copy(to->cpumask, from->cpumask);
3354        /*
3355         * Unlike hash and equality test, this function doesn't ignore
3356         * ->no_numa as it is used for both pool and wq attrs.  Instead,
3357         * get_unbound_pool() explicitly clears ->no_numa after copying.
3358         */
3359        to->no_numa = from->no_numa;
3360}
3361
3362/* hash value of the content of @attr */
3363static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3364{
3365        u32 hash = 0;
3366
3367        hash = jhash_1word(attrs->nice, hash);
3368        hash = jhash(cpumask_bits(attrs->cpumask),
3369                     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3370        return hash;
3371}
3372
3373/* content equality test */
3374static bool wqattrs_equal(const struct workqueue_attrs *a,
3375                          const struct workqueue_attrs *b)
3376{
3377        if (a->nice != b->nice)
3378                return false;
3379        if (!cpumask_equal(a->cpumask, b->cpumask))
3380                return false;
3381        return true;
3382}
3383
3384/**
3385 * init_worker_pool - initialize a newly zalloc'd worker_pool
3386 * @pool: worker_pool to initialize
3387 *
3388 * Initiailize a newly zalloc'd @pool.  It also allocates @pool->attrs.
3389 *
3390 * Return: 0 on success, -errno on failure.  Even on failure, all fields
3391 * inside @pool proper are initialized and put_unbound_pool() can be called
3392 * on @pool safely to release it.
3393 */
3394static int init_worker_pool(struct worker_pool *pool)
3395{
3396        spin_lock_init(&pool->lock);
3397        pool->id = -1;
3398        pool->cpu = -1;
3399        pool->node = NUMA_NO_NODE;
3400        pool->flags |= POOL_DISASSOCIATED;
3401        INIT_LIST_HEAD(&pool->worklist);
3402        INIT_LIST_HEAD(&pool->idle_list);
3403        hash_init(pool->busy_hash);
3404
3405        init_timer_deferrable(&pool->idle_timer);
3406        pool->idle_timer.function = idle_worker_timeout;
3407        pool->idle_timer.data = (unsigned long)pool;
3408
3409        setup_timer(&pool->mayday_timer, pool_mayday_timeout,
3410                    (unsigned long)pool);
3411
3412        mutex_init(&pool->manager_arb);
3413        mutex_init(&pool->attach_mutex);
3414        INIT_LIST_HEAD(&pool->workers);
3415
3416        ida_init(&pool->worker_ida);
3417        INIT_HLIST_NODE(&pool->hash_node);
3418        pool->refcnt = 1;
3419
3420        /* shouldn't fail above this point */
3421        pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
3422        if (!pool->attrs)
3423                return -ENOMEM;
3424        return 0;
3425}
3426
3427static void rcu_free_pool(struct rcu_head *rcu)
3428{
3429        struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3430
3431        ida_destroy(&pool->worker_ida);
3432        free_workqueue_attrs(pool->attrs);
3433        kfree(pool);
3434}
3435
3436/**
3437 * put_unbound_pool - put a worker_pool
3438 * @pool: worker_pool to put
3439 *
3440 * Put @pool.  If its refcnt reaches zero, it gets destroyed in sched-RCU
3441 * safe manner.  get_unbound_pool() calls this function on its failure path
3442 * and this function should be able to release pools which went through,
3443 * successfully or not, init_worker_pool().
3444 *
3445 * Should be called with wq_pool_mutex held.
3446 */
3447static void put_unbound_pool(struct worker_pool *pool)
3448{
3449        DECLARE_COMPLETION_ONSTACK(detach_completion);
3450        struct worker *worker;
3451
3452        lockdep_assert_held(&wq_pool_mutex);
3453
3454        if (--pool->refcnt)
3455                return;
3456
3457        /* sanity checks */
3458        if (WARN_ON(!(pool->cpu < 0)) ||
3459            WARN_ON(!list_empty(&pool->worklist)))
3460                return;
3461
3462        /* release id and unhash */
3463        if (pool->id >= 0)
3464                idr_remove(&worker_pool_idr, pool->id);
3465        hash_del(&pool->hash_node);
3466
3467        /*
3468         * Become the manager and destroy all workers.  Grabbing
3469         * manager_arb prevents @pool's workers from blocking on
3470         * attach_mutex.
3471         */
3472        mutex_lock(&pool->manager_arb);
3473
3474        spin_lock_irq(&pool->lock);
3475        while ((worker = first_idle_worker(pool)))
3476                destroy_worker(worker);
3477        WARN_ON(pool->nr_workers || pool->nr_idle);
3478        spin_unlock_irq(&pool->lock);
3479
3480        mutex_lock(&pool->attach_mutex);
3481        if (!list_empty(&pool->workers))
3482                pool->detach_completion = &detach_completion;
3483        mutex_unlock(&pool->attach_mutex);
3484
3485        if (pool->detach_completion)
3486                wait_for_completion(pool->detach_completion);
3487
3488        mutex_unlock(&pool->manager_arb);
3489
3490        /* shut down the timers */
3491        del_timer_sync(&pool->idle_timer);
3492        del_timer_sync(&pool->mayday_timer);
3493
3494        /* sched-RCU protected to allow dereferences from get_work_pool() */
3495        call_rcu_sched(&pool->rcu, rcu_free_pool);
3496}
3497
3498/**
3499 * get_unbound_pool - get a worker_pool with the specified attributes
3500 * @attrs: the attributes of the worker_pool to get
3501 *
3502 * Obtain a worker_pool which has the same attributes as @attrs, bump the
3503 * reference count and return it.  If there already is a matching
3504 * worker_pool, it will be used; otherwise, this function attempts to
3505 * create a new one.
3506 *
3507 * Should be called with wq_pool_mutex held.
3508 *
3509 * Return: On success, a worker_pool with the same attributes as @attrs.
3510 * On failure, %NULL.
3511 */
3512static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3513{
3514        u32 hash = wqattrs_hash(attrs);
3515        struct worker_pool *pool;
3516        int node;
3517
3518        lockdep_assert_held(&wq_pool_mutex);
3519
3520        /* do we already have a matching pool? */
3521        hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3522                if (wqattrs_equal(pool->attrs, attrs)) {
3523                        pool->refcnt++;
3524                        return pool;
3525                }
3526        }
3527
3528        /* nope, create a new one */
3529        pool = kzalloc(sizeof(*pool), GFP_KERNEL);
3530        if (!pool || init_worker_pool(pool) < 0)
3531                goto fail;
3532
3533        lockdep_set_subclass(&pool->lock, 1);   /* see put_pwq() */
3534        copy_workqueue_attrs(pool->attrs, attrs);
3535
3536        /*
3537         * no_numa isn't a worker_pool attribute, always clear it.  See
3538         * 'struct workqueue_attrs' comments for detail.
3539         */
3540        pool->attrs->no_numa = false;
3541
3542        /* if cpumask is contained inside a NUMA node, we belong to that node */
3543        if (wq_numa_enabled) {
3544                for_each_node(node) {
3545                        if (cpumask_subset(pool->attrs->cpumask,
3546                                           wq_numa_possible_cpumask[node])) {
3547                                pool->node = node;
3548                                break;
3549                        }
3550                }
3551        }
3552
3553        if (worker_pool_assign_id(pool) < 0)
3554                goto fail;
3555
3556        /* create and start the initial worker */
3557        if (!create_worker(pool))
3558                goto fail;
3559
3560        /* install */
3561        hash_add(unbound_pool_hash, &pool->hash_node, hash);
3562
3563        return pool;
3564fail:
3565        if (pool)
3566                put_unbound_pool(pool);
3567        return NULL;
3568}
3569
3570static void rcu_free_pwq(struct rcu_head *rcu)
3571{
3572        kmem_cache_free(pwq_cache,
3573                        container_of(rcu, struct pool_workqueue, rcu));
3574}
3575
3576/*
3577 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3578 * and needs to be destroyed.
3579 */
3580static void pwq_unbound_release_workfn(struct work_struct *work)
3581{
3582        struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3583                                                  unbound_release_work);
3584        struct workqueue_struct *wq = pwq->wq;
3585        struct worker_pool *pool = pwq->pool;
3586        bool is_last;
3587
3588        if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3589                return;
3590
3591        mutex_lock(&wq->mutex);
3592        list_del_rcu(&pwq->pwqs_node);
3593        is_last = list_empty(&wq->pwqs);
3594        mutex_unlock(&wq->mutex);
3595
3596        mutex_lock(&wq_pool_mutex);
3597        put_unbound_pool(pool);
3598        mutex_unlock(&wq_pool_mutex);
3599
3600        call_rcu_sched(&pwq->rcu, rcu_free_pwq);
3601
3602        /*
3603         * If we're the last pwq going away, @wq is already dead and no one
3604         * is gonna access it anymore.  Free it.
3605         */
3606        if (is_last) {
3607                free_workqueue_attrs(wq->unbound_attrs);
3608                kfree(wq);
3609        }
3610}
3611
3612/**
3613 * pwq_adjust_max_active - update a pwq's max_active to the current setting
3614 * @pwq: target pool_workqueue
3615 *
3616 * If @pwq isn't freezing, set @pwq->max_active to the associated
3617 * workqueue's saved_max_active and activate delayed work items
3618 * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
3619 */
3620static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3621{
3622        struct workqueue_struct *wq = pwq->wq;
3623        bool freezable = wq->flags & WQ_FREEZABLE;
3624
3625        /* for @wq->saved_max_active */
3626        lockdep_assert_held(&wq->mutex);
3627
3628        /* fast exit for non-freezable wqs */
3629        if (!freezable && pwq->max_active == wq->saved_max_active)
3630                return;
3631
3632        spin_lock_irq(&pwq->pool->lock);
3633
3634        /*
3635         * During [un]freezing, the caller is responsible for ensuring that
3636         * this function is called at least once after @workqueue_freezing
3637         * is updated and visible.
3638         */
3639        if (!freezable || !workqueue_freezing) {
3640                pwq->max_active = wq->saved_max_active;
3641
3642                while (!list_empty(&pwq->delayed_works) &&
3643                       pwq->nr_active < pwq->max_active)
3644                        pwq_activate_first_delayed(pwq);
3645
3646                /*
3647                 * Need to kick a worker after thawed or an unbound wq's
3648                 * max_active is bumped.  It's a slow path.  Do it always.
3649                 */
3650                wake_up_worker(pwq->pool);
3651        } else {
3652                pwq->max_active = 0;
3653        }
3654
3655        spin_unlock_irq(&pwq->pool->lock);
3656}
3657
3658/* initialize newly alloced @pwq which is associated with @wq and @pool */
3659static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3660                     struct worker_pool *pool)
3661{
3662        BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3663
3664        memset(pwq, 0, sizeof(*pwq));
3665
3666        pwq->pool = pool;
3667        pwq->wq = wq;
3668        pwq->flush_color = -1;
3669        pwq->refcnt = 1;
3670        INIT_LIST_HEAD(&pwq->delayed_works);
3671        INIT_LIST_HEAD(&pwq->pwqs_node);
3672        INIT_LIST_HEAD(&pwq->mayday_node);
3673        INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3674}
3675
3676/* sync @pwq with the current state of its associated wq and link it */
3677static void link_pwq(struct pool_workqueue *pwq)
3678{
3679        struct workqueue_struct *wq = pwq->wq;
3680
3681        lockdep_assert_held(&wq->mutex);
3682
3683        /* may be called multiple times, ignore if already linked */
3684        if (!list_empty(&pwq->pwqs_node))
3685                return;
3686
3687        /* set the matching work_color */
3688        pwq->work_color = wq->work_color;
3689
3690        /* sync max_active to the current setting */
3691        pwq_adjust_max_active(pwq);
3692
3693        /* link in @pwq */
3694        list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3695}
3696
3697/* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3698static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3699                                        const struct workqueue_attrs *attrs)
3700{
3701        struct worker_pool *pool;
3702        struct pool_workqueue *pwq;
3703
3704        lockdep_assert_held(&wq_pool_mutex);
3705
3706        pool = get_unbound_pool(attrs);
3707        if (!pool)
3708                return NULL;
3709
3710        pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3711        if (!pwq) {
3712                put_unbound_pool(pool);
3713                return NULL;
3714        }
3715
3716        init_pwq(pwq, wq, pool);
3717        return pwq;
3718}
3719
3720/* undo alloc_unbound_pwq(), used only in the error path */
3721static void free_unbound_pwq(struct pool_workqueue *pwq)
3722{
3723        lockdep_assert_held(&wq_pool_mutex);
3724
3725        if (pwq) {
3726                put_unbound_pool(pwq->pool);
3727                kmem_cache_free(pwq_cache, pwq);
3728        }
3729}
3730
3731/**
3732 * wq_calc_node_mask - calculate a wq_attrs' cpumask for the specified node
3733 * @attrs: the wq_attrs of interest
3734 * @node: the target NUMA node
3735 * @cpu_going_down: if >= 0, the CPU to consider as offline
3736 * @cpumask: outarg, the resulting cpumask
3737 *
3738 * Calculate the cpumask a workqueue with @attrs should use on @node.  If
3739 * @cpu_going_down is >= 0, that cpu is considered offline during
3740 * calculation.  The result is stored in @cpumask.
3741 *
3742 * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
3743 * enabled and @node has online CPUs requested by @attrs, the returned
3744 * cpumask is the intersection of the possible CPUs of @node and
3745 * @attrs->cpumask.
3746 *
3747 * The caller is responsible for ensuring that the cpumask of @node stays
3748 * stable.
3749 *
3750 * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3751 * %false if equal.
3752 */
3753static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3754                                 int cpu_going_down, cpumask_t *cpumask)
3755{
3756        if (!wq_numa_enabled || attrs->no_numa)
3757                goto use_dfl;
3758
3759        /* does @node have any online CPUs @attrs wants? */
3760        cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3761        if (cpu_going_down >= 0)
3762                cpumask_clear_cpu(cpu_going_down, cpumask);
3763
3764        if (cpumask_empty(cpumask))
3765                goto use_dfl;
3766
3767        /* yeap, return possible CPUs in @node that @attrs wants */
3768        cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3769        return !cpumask_equal(cpumask, attrs->cpumask);
3770
3771use_dfl:
3772        cpumask_copy(cpumask, attrs->cpumask);
3773        return false;
3774}
3775
3776/* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
3777static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3778                                                   int node,
3779                                                   struct pool_workqueue *pwq)
3780{
3781        struct pool_workqueue *old_pwq;
3782
3783        lockdep_assert_held(&wq->mutex);
3784
3785        /* link_pwq() can handle duplicate calls */
3786        link_pwq(pwq);
3787
3788        old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3789        rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3790        return old_pwq;
3791}
3792
3793/**
3794 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
3795 * @wq: the target workqueue
3796 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
3797 *
3798 * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
3799 * machines, this function maps a separate pwq to each NUMA node with
3800 * possibles CPUs in @attrs->cpumask so that work items are affine to the
3801 * NUMA node it was issued on.  Older pwqs are released as in-flight work
3802 * items finish.  Note that a work item which repeatedly requeues itself
3803 * back-to-back will stay on its current pwq.
3804 *
3805 * Performs GFP_KERNEL allocations.
3806 *
3807 * Return: 0 on success and -errno on failure.
3808 */
3809int apply_workqueue_attrs(struct workqueue_struct *wq,
3810                          const struct workqueue_attrs *attrs)
3811{
3812        struct workqueue_attrs *new_attrs, *tmp_attrs;
3813        struct pool_workqueue **pwq_tbl, *dfl_pwq;
3814        int node, ret;
3815
3816        /* only unbound workqueues can change attributes */
3817        if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
3818                return -EINVAL;
3819
3820        /* creating multiple pwqs breaks ordering guarantee */
3821        if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
3822                return -EINVAL;
3823
3824        pwq_tbl = kzalloc(nr_node_ids * sizeof(pwq_tbl[0]), GFP_KERNEL);
3825        new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3826        tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3827        if (!pwq_tbl || !new_attrs || !tmp_attrs)
3828                goto enomem;
3829
3830        /* make a copy of @attrs and sanitize it */
3831        copy_workqueue_attrs(new_attrs, attrs);
3832        cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
3833
3834        /*
3835         * We may create multiple pwqs with differing cpumasks.  Make a
3836         * copy of @new_attrs which will be modified and used to obtain
3837         * pools.
3838         */
3839        copy_workqueue_attrs(tmp_attrs, new_attrs);
3840
3841        /*
3842         * CPUs should stay stable across pwq creations and installations.
3843         * Pin CPUs, determine the target cpumask for each node and create
3844         * pwqs accordingly.
3845         */
3846        get_online_cpus();
3847
3848        mutex_lock(&wq_pool_mutex);
3849
3850        /*
3851         * If something goes wrong during CPU up/down, we'll fall back to
3852         * the default pwq covering whole @attrs->cpumask.  Always create
3853         * it even if we don't use it immediately.
3854         */
3855        dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3856        if (!dfl_pwq)
3857                goto enomem_pwq;
3858
3859        for_each_node(node) {
3860                if (wq_calc_node_cpumask(attrs, node, -1, tmp_attrs->cpumask)) {
3861                        pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3862                        if (!pwq_tbl[node])
3863                                goto enomem_pwq;
3864                } else {
3865                        dfl_pwq->refcnt++;
3866                        pwq_tbl[node] = dfl_pwq;
3867                }
3868        }
3869
3870        mutex_unlock(&wq_pool_mutex);
3871
3872        /* all pwqs have been created successfully, let's install'em */
3873        mutex_lock(&wq->mutex);
3874
3875        copy_workqueue_attrs(wq->unbound_attrs, new_attrs);
3876
3877        /* save the previous pwq and install the new one */
3878        for_each_node(node)
3879                pwq_tbl[node] = numa_pwq_tbl_install(wq, node, pwq_tbl[node]);
3880
3881        /* @dfl_pwq might not have been used, ensure it's linked */
3882        link_pwq(dfl_pwq);
3883        swap(wq->dfl_pwq, dfl_pwq);
3884
3885        mutex_unlock(&wq->mutex);
3886
3887        /* put the old pwqs */
3888        for_each_node(node)
3889                put_pwq_unlocked(pwq_tbl[node]);
3890        put_pwq_unlocked(dfl_pwq);
3891
3892        put_online_cpus();
3893        ret = 0;
3894        /* fall through */
3895out_free:
3896        free_workqueue_attrs(tmp_attrs);
3897        free_workqueue_attrs(new_attrs);
3898        kfree(pwq_tbl);
3899        return ret;
3900
3901enomem_pwq:
3902        free_unbound_pwq(dfl_pwq);
3903        for_each_node(node)
3904                if (pwq_tbl && pwq_tbl[node] != dfl_pwq)
3905                        free_unbound_pwq(pwq_tbl[node]);
3906        mutex_unlock(&wq_pool_mutex);
3907        put_online_cpus();
3908enomem:
3909        ret = -ENOMEM;
3910        goto out_free;
3911}
3912
3913/**
3914 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
3915 * @wq: the target workqueue
3916 * @cpu: the CPU coming up or going down
3917 * @online: whether @cpu is coming up or going down
3918 *
3919 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
3920 * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update NUMA affinity of
3921 * @wq accordingly.
3922 *
3923 * If NUMA affinity can't be adjusted due to memory allocation failure, it
3924 * falls back to @wq->dfl_pwq which may not be optimal but is always
3925 * correct.
3926 *
3927 * Note that when the last allowed CPU of a NUMA node goes offline for a
3928 * workqueue with a cpumask spanning multiple nodes, the workers which were
3929 * already executing the work items for the workqueue will lose their CPU
3930 * affinity and may execute on any CPU.  This is similar to how per-cpu
3931 * workqueues behave on CPU_DOWN.  If a workqueue user wants strict
3932 * affinity, it's the user's responsibility to flush the work item from
3933 * CPU_DOWN_PREPARE.
3934 */
3935static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
3936                                   bool online)
3937{
3938        int node = cpu_to_node(cpu);
3939        int cpu_off = online ? -1 : cpu;
3940        struct pool_workqueue *old_pwq = NULL, *pwq;
3941        struct workqueue_attrs *target_attrs;
3942        cpumask_t *cpumask;
3943
3944        lockdep_assert_held(&wq_pool_mutex);
3945
3946        if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND))
3947                return;
3948
3949        /*
3950         * We don't wanna alloc/free wq_attrs for each wq for each CPU.
3951         * Let's use a preallocated one.  The following buf is protected by
3952         * CPU hotplug exclusion.
3953         */
3954        target_attrs = wq_update_unbound_numa_attrs_buf;
3955        cpumask = target_attrs->cpumask;
3956
3957        mutex_lock(&wq->mutex);
3958        if (wq->unbound_attrs->no_numa)
3959                goto out_unlock;
3960
3961        copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
3962        pwq = unbound_pwq_by_node(wq, node);
3963
3964        /*
3965         * Let's determine what needs to be done.  If the target cpumask is
3966         * different from wq's, we need to compare it to @pwq's and create
3967         * a new one if they don't match.  If the target cpumask equals
3968         * wq's, the default pwq should be used.
3969         */
3970        if (wq_calc_node_cpumask(wq->unbound_attrs, node, cpu_off, cpumask)) {
3971                if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
3972                        goto out_unlock;
3973        } else {
3974                goto use_dfl_pwq;
3975        }
3976
3977        mutex_unlock(&wq->mutex);
3978
3979        /* create a new pwq */
3980        pwq = alloc_unbound_pwq(wq, target_attrs);
3981        if (!pwq) {
3982                pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
3983                        wq->name);
3984                mutex_lock(&wq->mutex);
3985                goto use_dfl_pwq;
3986        }
3987
3988        /*
3989         * Install the new pwq.  As this function is called only from CPU
3990         * hotplug callbacks and applying a new attrs is wrapped with
3991         * get/put_online_cpus(), @wq->unbound_attrs couldn't have changed
3992         * inbetween.
3993         */
3994        mutex_lock(&wq->mutex);
3995        old_pwq = numa_pwq_tbl_install(wq, node, pwq);
3996        goto out_unlock;
3997
3998use_dfl_pwq:
3999        spin_lock_irq(&wq->dfl_pwq->pool->lock);
4000        get_pwq(wq->dfl_pwq);
4001        spin_unlock_irq(&wq->dfl_pwq->pool->lock);
4002        old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
4003out_unlock:
4004        mutex_unlock(&wq->mutex);
4005        put_pwq_unlocked(old_pwq);
4006}
4007
4008static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4009{
4010        bool highpri = wq->flags & WQ_HIGHPRI;
4011        int cpu, ret;
4012
4013        if (!(wq->flags & WQ_UNBOUND)) {
4014                wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
4015                if (!wq->cpu_pwqs)
4016                        return -ENOMEM;
4017
4018                for_each_possible_cpu(cpu) {
4019                        struct pool_workqueue *pwq =
4020                                per_cpu_ptr(wq->cpu_pwqs, cpu);
4021                        struct worker_pool *cpu_pools =
4022                                per_cpu(cpu_worker_pools, cpu);
4023
4024                        init_pwq(pwq, wq, &cpu_pools[highpri]);
4025
4026                        mutex_lock(&wq->mutex);
4027                        link_pwq(pwq);
4028                        mutex_unlock(&wq->mutex);
4029                }
4030                return 0;
4031        } else if (wq->flags & __WQ_ORDERED) {
4032                ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4033                /* there should only be single pwq for ordering guarantee */
4034                WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4035                              wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4036                     "ordering guarantee broken for workqueue %s\n", wq->name);
4037                return ret;
4038        } else {
4039                return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4040        }
4041}
4042
4043static int wq_clamp_max_active(int max_active, unsigned int flags,
4044                               const char *name)
4045{
4046        int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
4047
4048        if (max_active < 1 || max_active > lim)
4049                pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
4050                        max_active, name, 1, lim);
4051
4052        return clamp_val(max_active, 1, lim);
4053}
4054
4055struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
4056                                               unsigned int flags,
4057                                               int max_active,
4058                                               struct lock_class_key *key,
4059                                               const char *lock_name, ...)
4060{
4061        size_t tbl_size = 0;
4062        va_list args;
4063        struct workqueue_struct *wq;
4064        struct pool_workqueue *pwq;
4065
4066        /* see the comment above the definition of WQ_POWER_EFFICIENT */
4067        if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4068                flags |= WQ_UNBOUND;
4069
4070        /* allocate wq and format name */
4071        if (flags & WQ_UNBOUND)
4072                tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
4073
4074        wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
4075        if (!wq)
4076                return NULL;
4077
4078        if (flags & WQ_UNBOUND) {
4079                wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
4080                if (!wq->unbound_attrs)
4081                        goto err_free_wq;
4082        }
4083
4084        va_start(args, lock_name);
4085        vsnprintf(wq->name, sizeof(wq->name), fmt, args);
4086        va_end(args);
4087
4088        max_active = max_active ?: WQ_DFL_ACTIVE;
4089        max_active = wq_clamp_max_active(max_active, flags, wq->name);
4090
4091        /* init wq */
4092        wq->flags = flags;
4093        wq->saved_max_active = max_active;
4094        mutex_init(&wq->mutex);
4095        atomic_set(&wq->nr_pwqs_to_flush, 0);
4096        INIT_LIST_HEAD(&wq->pwqs);
4097        INIT_LIST_HEAD(&wq->flusher_queue);
4098        INIT_LIST_HEAD(&wq->flusher_overflow);
4099        INIT_LIST_HEAD(&wq->maydays);
4100
4101        lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
4102        INIT_LIST_HEAD(&wq->list);
4103
4104        if (alloc_and_link_pwqs(wq) < 0)
4105                goto err_free_wq;
4106
4107        /*
4108         * Workqueues which may be used during memory reclaim should
4109         * have a rescuer to guarantee forward progress.
4110         */
4111        if (flags & WQ_MEM_RECLAIM) {
4112                struct worker *rescuer;
4113
4114                rescuer = alloc_worker(NUMA_NO_NODE);
4115                if (!rescuer)
4116                        goto err_destroy;
4117
4118                rescuer->rescue_wq = wq;
4119                rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
4120                                               wq->name);
4121                if (IS_ERR(rescuer->task)) {
4122                        kfree(rescuer);
4123                        goto err_destroy;
4124                }
4125
4126                wq->rescuer = rescuer;
4127                rescuer->task->flags |= PF_NO_SETAFFINITY;
4128                wake_up_process(rescuer->task);
4129        }
4130
4131        if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4132                goto err_destroy;
4133
4134        /*
4135         * wq_pool_mutex protects global freeze state and workqueues list.
4136         * Grab it, adjust max_active and add the new @wq to workqueues
4137         * list.
4138         */
4139        mutex_lock(&wq_pool_mutex);
4140
4141        mutex_lock(&wq->mutex);
4142        for_each_pwq(pwq, wq)
4143                pwq_adjust_max_active(pwq);
4144        mutex_unlock(&wq->mutex);
4145
4146        list_add(&wq->list, &workqueues);
4147
4148        mutex_unlock(&wq_pool_mutex);
4149
4150        return wq;
4151
4152err_free_wq:
4153        free_workqueue_attrs(wq->unbound_attrs);
4154        kfree(wq);
4155        return NULL;
4156err_destroy:
4157        destroy_workqueue(wq);
4158        return NULL;
4159}
4160EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
4161
4162/**
4163 * destroy_workqueue - safely terminate a workqueue
4164 * @wq: target workqueue
4165 *
4166 * Safely destroy a workqueue. All work currently pending will be done first.
4167 */
4168void destroy_workqueue(struct workqueue_struct *wq)
4169{
4170        struct pool_workqueue *pwq;
4171        int node;
4172
4173        /* drain it before proceeding with destruction */
4174        drain_workqueue(wq);
4175
4176        /* sanity checks */
4177        mutex_lock(&wq->mutex);
4178        for_each_pwq(pwq, wq) {
4179                int i;
4180
4181                for (i = 0; i < WORK_NR_COLORS; i++) {
4182                        if (WARN_ON(pwq->nr_in_flight[i])) {
4183                                mutex_unlock(&wq->mutex);
4184                                return;
4185                        }
4186                }
4187
4188                if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
4189                    WARN_ON(pwq->nr_active) ||
4190                    WARN_ON(!list_empty(&pwq->delayed_works))) {
4191                        mutex_unlock(&wq->mutex);
4192                        return;
4193                }
4194        }
4195        mutex_unlock(&wq->mutex);
4196
4197        /*
4198         * wq list is used to freeze wq, remove from list after
4199         * flushing is complete in case freeze races us.
4200         */
4201        mutex_lock(&wq_pool_mutex);
4202        list_del_init(&wq->list);
4203        mutex_unlock(&wq_pool_mutex);
4204
4205        workqueue_sysfs_unregister(wq);
4206
4207        if (wq->rescuer) {
4208                kthread_stop(wq->rescuer->task);
4209                kfree(wq->rescuer);
4210                wq->rescuer = NULL;
4211        }
4212
4213        if (!(wq->flags & WQ_UNBOUND)) {
4214                /*
4215                 * The base ref is never dropped on per-cpu pwqs.  Directly
4216                 * free the pwqs and wq.
4217                 */
4218                free_percpu(wq->cpu_pwqs);
4219                kfree(wq);
4220        } else {
4221                /*
4222                 * We're the sole accessor of @wq at this point.  Directly
4223                 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
4224                 * @wq will be freed when the last pwq is released.
4225                 */
4226                for_each_node(node) {
4227                        pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4228                        RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4229                        put_pwq_unlocked(pwq);
4230                }
4231
4232                /*
4233                 * Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
4234                 * put.  Don't access it afterwards.
4235                 */
4236                pwq = wq->dfl_pwq;
4237                wq->dfl_pwq = NULL;
4238                put_pwq_unlocked(pwq);
4239        }
4240}
4241EXPORT_SYMBOL_GPL(destroy_workqueue);
4242
4243/**
4244 * workqueue_set_max_active - adjust max_active of a workqueue
4245 * @wq: target workqueue
4246 * @max_active: new max_active value.
4247 *
4248 * Set max_active of @wq to @max_active.
4249 *
4250 * CONTEXT:
4251 * Don't call from IRQ context.
4252 */
4253void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4254{
4255        struct pool_workqueue *pwq;
4256
4257        /* disallow meddling with max_active for ordered workqueues */
4258        if (WARN_ON(wq->flags & __WQ_ORDERED))
4259                return;
4260
4261        max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4262
4263        mutex_lock(&wq->mutex);
4264
4265        wq->saved_max_active = max_active;
4266
4267        for_each_pwq(pwq, wq)
4268                pwq_adjust_max_active(pwq);
4269
4270        mutex_unlock(&wq->mutex);
4271}
4272EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4273
4274/**
4275 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4276 *
4277 * Determine whether %current is a workqueue rescuer.  Can be used from
4278 * work functions to determine whether it's being run off the rescuer task.
4279 *
4280 * Return: %true if %current is a workqueue rescuer. %false otherwise.
4281 */
4282bool current_is_workqueue_rescuer(void)
4283{
4284        struct worker *worker = current_wq_worker();
4285
4286        return worker && worker->rescue_wq;
4287}
4288
4289/**
4290 * workqueue_congested - test whether a workqueue is congested
4291 * @cpu: CPU in question
4292 * @wq: target workqueue
4293 *
4294 * Test whether @wq's cpu workqueue for @cpu is congested.  There is
4295 * no synchronization around this function and the test result is
4296 * unreliable and only useful as advisory hints or for debugging.
4297 *
4298 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4299 * Note that both per-cpu and unbound workqueues may be associated with
4300 * multiple pool_workqueues which have separate congested states.  A
4301 * workqueue being congested on one CPU doesn't mean the workqueue is also
4302 * contested on other CPUs / NUMA nodes.
4303 *
4304 * Return:
4305 * %true if congested, %false otherwise.
4306 */
4307bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4308{
4309        struct pool_workqueue *pwq;
4310        bool ret;
4311
4312        rcu_read_lock_sched();
4313
4314        if (cpu == WORK_CPU_UNBOUND)
4315                cpu = smp_processor_id();
4316
4317        if (!(wq->flags & WQ_UNBOUND))
4318                pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4319        else
4320                pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4321
4322        ret = !list_empty(&pwq->delayed_works);
4323        rcu_read_unlock_sched();
4324
4325        return ret;
4326}
4327EXPORT_SYMBOL_GPL(workqueue_congested);
4328
4329/**
4330 * work_busy - test whether a work is currently pending or running
4331 * @work: the work to be tested
4332 *
4333 * Test whether @work is currently pending or running.  There is no
4334 * synchronization around this function and the test result is
4335 * unreliable and only useful as advisory hints or for debugging.
4336 *
4337 * Return:
4338 * OR'd bitmask of WORK_BUSY_* bits.
4339 */
4340unsigned int work_busy(struct work_struct *work)
4341{
4342        struct worker_pool *pool;
4343        unsigned long flags;
4344        unsigned int ret = 0;
4345
4346        if (work_pending(work))
4347                ret |= WORK_BUSY_PENDING;
4348
4349        local_irq_save(flags);
4350        pool = get_work_pool(work);
4351        if (pool) {
4352                spin_lock(&pool->lock);
4353                if (find_worker_executing_work(pool, work))
4354                        ret |= WORK_BUSY_RUNNING;
4355                spin_unlock(&pool->lock);
4356        }
4357        local_irq_restore(flags);
4358
4359        return ret;
4360}
4361EXPORT_SYMBOL_GPL(work_busy);
4362
4363/**
4364 * set_worker_desc - set description for the current work item
4365 * @fmt: printf-style format string
4366 * @...: arguments for the format string
4367 *
4368 * This function can be called by a running work function to describe what
4369 * the work item is about.  If the worker task gets dumped, this
4370 * information will be printed out together to help debugging.  The
4371 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4372 */
4373void set_worker_desc(const char *fmt, ...)
4374{
4375        struct worker *worker = current_wq_worker();
4376        va_list args;
4377
4378        if (worker) {
4379                va_start(args, fmt);
4380                vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4381                va_end(args);
4382                worker->desc_valid = true;
4383        }
4384}
4385
4386/**
4387 * print_worker_info - print out worker information and description
4388 * @log_lvl: the log level to use when printing
4389 * @task: target task
4390 *
4391 * If @task is a worker and currently executing a work item, print out the
4392 * name of the workqueue being serviced and worker description set with
4393 * set_worker_desc() by the currently executing work item.
4394 *
4395 * This function can be safely called on any task as long as the
4396 * task_struct itself is accessible.  While safe, this function isn't
4397 * synchronized and may print out mixups or garbages of limited length.
4398 */
4399void print_worker_info(const char *log_lvl, struct task_struct *task)
4400{
4401        work_func_t *fn = NULL;
4402        char name[WQ_NAME_LEN] = { };
4403        char desc[WORKER_DESC_LEN] = { };
4404        struct pool_workqueue *pwq = NULL;
4405        struct workqueue_struct *wq = NULL;
4406        bool desc_valid = false;
4407        struct worker *worker;
4408
4409        if (!(task->flags & PF_WQ_WORKER))
4410                return;
4411
4412        /*
4413         * This function is called without any synchronization and @task
4414         * could be in any state.  Be careful with dereferences.
4415         */
4416        worker = probe_kthread_data(task);
4417
4418        /*
4419         * Carefully copy the associated workqueue's workfn and name.  Keep
4420         * the original last '\0' in case the original contains garbage.
4421         */
4422        probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
4423        probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
4424        probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
4425        probe_kernel_read(name, wq->name, sizeof(name) - 1);
4426
4427        /* copy worker description */
4428        probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid));
4429        if (desc_valid)
4430                probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
4431
4432        if (fn || name[0] || desc[0]) {
4433                printk("%sWorkqueue: %s %pf", log_lvl, name, fn);
4434                if (desc[0])
4435                        pr_cont(" (%s)", desc);
4436                pr_cont("\n");
4437        }
4438}
4439
4440/*
4441 * CPU hotplug.
4442 *
4443 * There are two challenges in supporting CPU hotplug.  Firstly, there
4444 * are a lot of assumptions on strong associations among work, pwq and
4445 * pool which make migrating pending and scheduled works very
4446 * difficult to implement without impacting hot paths.  Secondly,
4447 * worker pools serve mix of short, long and very long running works making
4448 * blocked draining impractical.
4449 *
4450 * This is solved by allowing the pools to be disassociated from the CPU
4451 * running as an unbound one and allowing it to be reattached later if the
4452 * cpu comes back online.
4453 */
4454
4455static void wq_unbind_fn(struct work_struct *work)
4456{
4457        int cpu = smp_processor_id();
4458        struct worker_pool *pool;
4459        struct worker *worker;
4460
4461        for_each_cpu_worker_pool(pool, cpu) {
4462                mutex_lock(&pool->attach_mutex);
4463                spin_lock_irq(&pool->lock);
4464
4465                /*
4466                 * We've blocked all attach/detach operations. Make all workers
4467                 * unbound and set DISASSOCIATED.  Before this, all workers
4468                 * except for the ones which are still executing works from
4469                 * before the last CPU down must be on the cpu.  After
4470                 * this, they may become diasporas.
4471                 */
4472                for_each_pool_worker(worker, pool)
4473                        worker->flags |= WORKER_UNBOUND;
4474
4475                pool->flags |= POOL_DISASSOCIATED;
4476
4477                spin_unlock_irq(&pool->lock);
4478                mutex_unlock(&pool->attach_mutex);
4479
4480                /*
4481                 * Call schedule() so that we cross rq->lock and thus can
4482                 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
4483                 * This is necessary as scheduler callbacks may be invoked
4484                 * from other cpus.
4485                 */
4486                schedule();
4487
4488                /*
4489                 * Sched callbacks are disabled now.  Zap nr_running.
4490                 * After this, nr_running stays zero and need_more_worker()
4491                 * and keep_working() are always true as long as the
4492                 * worklist is not empty.  This pool now behaves as an
4493                 * unbound (in terms of concurrency management) pool which
4494                 * are served by workers tied to the pool.
4495                 */
4496                atomic_set(&pool->nr_running, 0);
4497
4498                /*
4499                 * With concurrency management just turned off, a busy
4500                 * worker blocking could lead to lengthy stalls.  Kick off
4501                 * unbound chain execution of currently pending work items.
4502                 */
4503                spin_lock_irq(&pool->lock);
4504                wake_up_worker(pool);
4505                spin_unlock_irq(&pool->lock);
4506        }
4507}
4508
4509/**
4510 * rebind_workers - rebind all workers of a pool to the associated CPU
4511 * @pool: pool of interest
4512 *
4513 * @pool->cpu is coming online.  Rebind all workers to the CPU.
4514 */
4515static void rebind_workers(struct worker_pool *pool)
4516{
4517        struct worker *worker;
4518
4519        lockdep_assert_held(&pool->attach_mutex);
4520
4521        /*
4522         * Restore CPU affinity of all workers.  As all idle workers should
4523         * be on the run-queue of the associated CPU before any local
4524         * wake-ups for concurrency management happen, restore CPU affinty
4525         * of all workers first and then clear UNBOUND.  As we're called
4526         * from CPU_ONLINE, the following shouldn't fail.
4527         */
4528        for_each_pool_worker(worker, pool)
4529                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4530                                                  pool->attrs->cpumask) < 0);
4531
4532        spin_lock_irq(&pool->lock);
4533        pool->flags &= ~POOL_DISASSOCIATED;
4534
4535        for_each_pool_worker(worker, pool) {
4536                unsigned int worker_flags = worker->flags;
4537
4538                /*
4539                 * A bound idle worker should actually be on the runqueue
4540                 * of the associated CPU for local wake-ups targeting it to
4541                 * work.  Kick all idle workers so that they migrate to the
4542                 * associated CPU.  Doing this in the same loop as
4543                 * replacing UNBOUND with REBOUND is safe as no worker will
4544                 * be bound before @pool->lock is released.
4545                 */
4546                if (worker_flags & WORKER_IDLE)
4547                        wake_up_process(worker->task);
4548
4549                /*
4550                 * We want to clear UNBOUND but can't directly call
4551                 * worker_clr_flags() or adjust nr_running.  Atomically
4552                 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
4553                 * @worker will clear REBOUND using worker_clr_flags() when
4554                 * it initiates the next execution cycle thus restoring
4555                 * concurrency management.  Note that when or whether
4556                 * @worker clears REBOUND doesn't affect correctness.
4557                 *
4558                 * ACCESS_ONCE() is necessary because @worker->flags may be
4559                 * tested without holding any lock in
4560                 * wq_worker_waking_up().  Without it, NOT_RUNNING test may
4561                 * fail incorrectly leading to premature concurrency
4562                 * management operations.
4563                 */
4564                WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4565                worker_flags |= WORKER_REBOUND;
4566                worker_flags &= ~WORKER_UNBOUND;
4567                ACCESS_ONCE(worker->flags) = worker_flags;
4568        }
4569
4570        spin_unlock_irq(&pool->lock);
4571}
4572
4573/**
4574 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
4575 * @pool: unbound pool of interest
4576 * @cpu: the CPU which is coming up
4577 *
4578 * An unbound pool may end up with a cpumask which doesn't have any online
4579 * CPUs.  When a worker of such pool get scheduled, the scheduler resets
4580 * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
4581 * online CPU before, cpus_allowed of all its workers should be restored.
4582 */
4583static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4584{
4585        static cpumask_t cpumask;
4586        struct worker *worker;
4587
4588        lockdep_assert_held(&pool->attach_mutex);
4589
4590        /* is @cpu allowed for @pool? */
4591        if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
4592                return;
4593
4594        /* is @cpu the only online CPU? */
4595        cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
4596        if (cpumask_weight(&cpumask) != 1)
4597                return;
4598
4599        /* as we're called from CPU_ONLINE, the following shouldn't fail */
4600        for_each_pool_worker(worker, pool)
4601                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4602                                                  pool->attrs->cpumask) < 0);
4603}
4604
4605/*
4606 * Workqueues should be brought up before normal priority CPU notifiers.
4607 * This will be registered high priority CPU notifier.
4608 */
4609static int workqueue_cpu_up_callback(struct notifier_block *nfb,
4610                                               unsigned long action,
4611                                               void *hcpu)
4612{
4613        int cpu = (unsigned long)hcpu;
4614        struct worker_pool *pool;
4615        struct workqueue_struct *wq;
4616        int pi;
4617
4618        switch (action & ~CPU_TASKS_FROZEN) {
4619        case CPU_UP_PREPARE:
4620                for_each_cpu_worker_pool(pool, cpu) {
4621                        if (pool->nr_workers)
4622                                continue;
4623                        if (!create_worker(pool))
4624                                return NOTIFY_BAD;
4625                }
4626                break;
4627
4628        case CPU_DOWN_FAILED:
4629        case CPU_ONLINE:
4630                mutex_lock(&wq_pool_mutex);
4631
4632                for_each_pool(pool, pi) {
4633                        mutex_lock(&pool->attach_mutex);
4634
4635                        if (pool->cpu == cpu)
4636                                rebind_workers(pool);
4637                        else if (pool->cpu < 0)
4638                                restore_unbound_workers_cpumask(pool, cpu);
4639
4640                        mutex_unlock(&pool->attach_mutex);
4641                }
4642
4643                /* update NUMA affinity of unbound workqueues */
4644                list_for_each_entry(wq, &workqueues, list)
4645                        wq_update_unbound_numa(wq, cpu, true);
4646
4647                mutex_unlock(&wq_pool_mutex);
4648                break;
4649        }
4650        return NOTIFY_OK;
4651}
4652
4653/*
4654 * Workqueues should be brought down after normal priority CPU notifiers.
4655 * This will be registered as low priority CPU notifier.
4656 */
4657static int workqueue_cpu_down_callback(struct notifier_block *nfb,
4658                                                 unsigned long action,
4659                                                 void *hcpu)
4660{
4661        int cpu = (unsigned long)hcpu;
4662        struct work_struct unbind_work;
4663        struct workqueue_struct *wq;
4664
4665        switch (action & ~CPU_TASKS_FROZEN) {
4666        case CPU_DOWN_PREPARE:
4667                /* unbinding per-cpu workers should happen on the local CPU */
4668                INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
4669                queue_work_on(cpu, system_highpri_wq, &unbind_work);
4670
4671                /* update NUMA affinity of unbound workqueues */
4672                mutex_lock(&wq_pool_mutex);
4673                list_for_each_entry(wq, &workqueues, list)
4674                        wq_update_unbound_numa(wq, cpu, false);
4675                mutex_unlock(&wq_pool_mutex);
4676
4677                /* wait for per-cpu unbinding to finish */
4678                flush_work(&unbind_work);
4679                destroy_work_on_stack(&unbind_work);
4680                break;
4681        }
4682        return NOTIFY_OK;
4683}
4684
4685#ifdef CONFIG_SMP
4686
4687struct work_for_cpu {
4688        struct work_struct work;
4689        long (*fn)(void *);
4690        void *arg;
4691        long ret;
4692};
4693
4694static void work_for_cpu_fn(struct work_struct *work)
4695{
4696        struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
4697
4698        wfc->ret = wfc->fn(wfc->arg);
4699}
4700
4701/**
4702 * work_on_cpu - run a function in user context on a particular cpu
4703 * @cpu: the cpu to run on
4704 * @fn: the function to run
4705 * @arg: the function arg
4706 *
4707 * It is up to the caller to ensure that the cpu doesn't go offline.
4708 * The caller must not hold any locks which would prevent @fn from completing.
4709 *
4710 * Return: The value @fn returns.
4711 */
4712long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4713{
4714        struct work_for_cpu wfc = { .fn = fn, .arg = arg };
4715
4716        INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4717        schedule_work_on(cpu, &wfc.work);
4718        flush_work(&wfc.work);
4719        destroy_work_on_stack(&wfc.work);
4720        return wfc.ret;
4721}
4722EXPORT_SYMBOL_GPL(work_on_cpu);
4723#endif /* CONFIG_SMP */
4724
4725#ifdef CONFIG_FREEZER
4726
4727/**
4728 * freeze_workqueues_begin - begin freezing workqueues
4729 *
4730 * Start freezing workqueues.  After this function returns, all freezable
4731 * workqueues will queue new works to their delayed_works list instead of
4732 * pool->worklist.
4733 *
4734 * CONTEXT:
4735 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4736 */
4737void freeze_workqueues_begin(void)
4738{
4739        struct workqueue_struct *wq;
4740        struct pool_workqueue *pwq;
4741
4742        mutex_lock(&wq_pool_mutex);
4743
4744        WARN_ON_ONCE(workqueue_freezing);
4745        workqueue_freezing = true;
4746
4747        list_for_each_entry(wq, &workqueues, list) {
4748                mutex_lock(&wq->mutex);
4749                for_each_pwq(pwq, wq)
4750                        pwq_adjust_max_active(pwq);
4751                mutex_unlock(&wq->mutex);
4752        }
4753
4754        mutex_unlock(&wq_pool_mutex);
4755}
4756
4757/**
4758 * freeze_workqueues_busy - are freezable workqueues still busy?
4759 *
4760 * Check whether freezing is complete.  This function must be called
4761 * between freeze_workqueues_begin() and thaw_workqueues().
4762 *
4763 * CONTEXT:
4764 * Grabs and releases wq_pool_mutex.
4765 *
4766 * Return:
4767 * %true if some freezable workqueues are still busy.  %false if freezing
4768 * is complete.
4769 */
4770bool freeze_workqueues_busy(void)
4771{
4772        bool busy = false;
4773        struct workqueue_struct *wq;
4774        struct pool_workqueue *pwq;
4775
4776        mutex_lock(&wq_pool_mutex);
4777
4778        WARN_ON_ONCE(!workqueue_freezing);
4779
4780        list_for_each_entry(wq, &workqueues, list) {
4781                if (!(wq->flags & WQ_FREEZABLE))
4782                        continue;
4783                /*
4784                 * nr_active is monotonically decreasing.  It's safe
4785                 * to peek without lock.
4786                 */
4787                rcu_read_lock_sched();
4788                for_each_pwq(pwq, wq) {
4789                        WARN_ON_ONCE(pwq->nr_active < 0);
4790                        if (pwq->nr_active) {
4791                                busy = true;
4792                                rcu_read_unlock_sched();
4793                                goto out_unlock;
4794                        }
4795                }
4796                rcu_read_unlock_sched();
4797        }
4798out_unlock:
4799        mutex_unlock(&wq_pool_mutex);
4800        return busy;
4801}
4802
4803/**
4804 * thaw_workqueues - thaw workqueues
4805 *
4806 * Thaw workqueues.  Normal queueing is restored and all collected
4807 * frozen works are transferred to their respective pool worklists.
4808 *
4809 * CONTEXT:
4810 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4811 */
4812void thaw_workqueues(void)
4813{
4814        struct workqueue_struct *wq;
4815        struct pool_workqueue *pwq;
4816
4817        mutex_lock(&wq_pool_mutex);
4818
4819        if (!workqueue_freezing)
4820                goto out_unlock;
4821
4822        workqueue_freezing = false;
4823
4824        /* restore max_active and repopulate worklist */
4825        list_for_each_entry(wq, &workqueues, list) {
4826                mutex_lock(&wq->mutex);
4827                for_each_pwq(pwq, wq)
4828                        pwq_adjust_max_active(pwq);
4829                mutex_unlock(&wq->mutex);
4830        }
4831
4832out_unlock:
4833        mutex_unlock(&wq_pool_mutex);
4834}
4835#endif /* CONFIG_FREEZER */
4836
4837static void __init wq_numa_init(void)
4838{
4839        cpumask_var_t *tbl;
4840        int node, cpu;
4841
4842        if (num_possible_nodes() <= 1)
4843                return;
4844
4845        if (wq_disable_numa) {
4846                pr_info("workqueue: NUMA affinity support disabled\n");
4847                return;
4848        }
4849
4850        wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
4851        BUG_ON(!wq_update_unbound_numa_attrs_buf);
4852
4853        /*
4854         * We want masks of possible CPUs of each node which isn't readily
4855         * available.  Build one from cpu_to_node() which should have been
4856         * fully initialized by now.
4857         */
4858        tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL);
4859        BUG_ON(!tbl);
4860
4861        for_each_node(node)
4862                BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
4863                                node_online(node) ? node : NUMA_NO_NODE));
4864
4865        for_each_possible_cpu(cpu) {
4866                node = cpu_to_node(cpu);
4867                if (WARN_ON(node == NUMA_NO_NODE)) {
4868                        pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
4869                        /* happens iff arch is bonkers, let's just proceed */
4870                        return;
4871                }
4872                cpumask_set_cpu(cpu, tbl[node]);
4873        }
4874
4875        wq_numa_possible_cpumask = tbl;
4876        wq_numa_enabled = true;
4877}
4878
4879static int __init init_workqueues(void)
4880{
4881        int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
4882        int i, cpu;
4883
4884        WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
4885
4886        pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
4887
4888        cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
4889        hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
4890
4891        wq_numa_init();
4892
4893        /* initialize CPU pools */
4894        for_each_possible_cpu(cpu) {
4895                struct worker_pool *pool;
4896
4897                i = 0;
4898                for_each_cpu_worker_pool(pool, cpu) {
4899                        BUG_ON(init_worker_pool(pool));
4900                        pool->cpu = cpu;
4901                        cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
4902                        pool->attrs->nice = std_nice[i++];
4903                        pool->node = cpu_to_node(cpu);
4904
4905                        /* alloc pool ID */
4906                        mutex_lock(&wq_pool_mutex);
4907                        BUG_ON(worker_pool_assign_id(pool));
4908                        mutex_unlock(&wq_pool_mutex);
4909                }
4910        }
4911
4912        /* create the initial worker */
4913        for_each_online_cpu(cpu) {
4914                struct worker_pool *pool;
4915
4916                for_each_cpu_worker_pool(pool, cpu) {
4917                        pool->flags &= ~POOL_DISASSOCIATED;
4918                        BUG_ON(!create_worker(pool));
4919                }
4920        }
4921
4922        /* create default unbound and ordered wq attrs */
4923        for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
4924                struct workqueue_attrs *attrs;
4925
4926                BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
4927                attrs->nice = std_nice[i];
4928                unbound_std_wq_attrs[i] = attrs;
4929
4930                /*
4931                 * An ordered wq should have only one pwq as ordering is
4932                 * guaranteed by max_active which is enforced by pwqs.
4933                 * Turn off NUMA so that dfl_pwq is used for all nodes.
4934                 */
4935                BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
4936                attrs->nice = std_nice[i];
4937                attrs->no_numa = true;
4938                ordered_wq_attrs[i] = attrs;
4939        }
4940
4941        system_wq = alloc_workqueue("events", 0, 0);
4942        system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
4943        system_long_wq = alloc_workqueue("events_long", 0, 0);
4944        system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
4945                                            WQ_UNBOUND_MAX_ACTIVE);
4946        system_freezable_wq = alloc_workqueue("events_freezable",
4947                                              WQ_FREEZABLE, 0);
4948        system_power_efficient_wq = alloc_workqueue("events_power_efficient",
4949                                              WQ_POWER_EFFICIENT, 0);
4950        system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
4951                                              WQ_FREEZABLE | WQ_POWER_EFFICIENT,
4952                                              0);
4953        BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
4954               !system_unbound_wq || !system_freezable_wq ||
4955               !system_power_efficient_wq ||
4956               !system_freezable_power_efficient_wq);
4957        return 0;
4958}
4959early_initcall(init_workqueues);
4960