linux/kernel/workqueue.c
<<
>>
Prefs
   1/*
   2 * kernel/workqueue.c - generic async execution with shared worker pool
   3 *
   4 * Copyright (C) 2002           Ingo Molnar
   5 *
   6 *   Derived from the taskqueue/keventd code by:
   7 *     David Woodhouse <dwmw2@infradead.org>
   8 *     Andrew Morton
   9 *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
  10 *     Theodore Ts'o <tytso@mit.edu>
  11 *
  12 * Made to use alloc_percpu by Christoph Lameter.
  13 *
  14 * Copyright (C) 2010           SUSE Linux Products GmbH
  15 * Copyright (C) 2010           Tejun Heo <tj@kernel.org>
  16 *
  17 * This is the generic async execution mechanism.  Work items as are
  18 * executed in process context.  The worker pool is shared and
  19 * automatically managed.  There are two worker pools for each CPU (one for
  20 * normal work items and the other for high priority ones) and some extra
  21 * pools for workqueues which are not bound to any specific CPU - the
  22 * number of these backing pools is dynamic.
  23 *
  24 * Please read Documentation/core-api/workqueue.rst for details.
  25 */
  26
  27#include <linux/export.h>
  28#include <linux/kernel.h>
  29#include <linux/sched.h>
  30#include <linux/init.h>
  31#include <linux/signal.h>
  32#include <linux/completion.h>
  33#include <linux/workqueue.h>
  34#include <linux/slab.h>
  35#include <linux/cpu.h>
  36#include <linux/notifier.h>
  37#include <linux/kthread.h>
  38#include <linux/hardirq.h>
  39#include <linux/mempolicy.h>
  40#include <linux/freezer.h>
  41#include <linux/kallsyms.h>
  42#include <linux/debug_locks.h>
  43#include <linux/lockdep.h>
  44#include <linux/idr.h>
  45#include <linux/jhash.h>
  46#include <linux/hashtable.h>
  47#include <linux/rculist.h>
  48#include <linux/nodemask.h>
  49#include <linux/moduleparam.h>
  50#include <linux/uaccess.h>
  51
  52#include "workqueue_internal.h"
  53
  54enum {
  55        /*
  56         * worker_pool flags
  57         *
  58         * A bound pool is either associated or disassociated with its CPU.
  59         * While associated (!DISASSOCIATED), all workers are bound to the
  60         * CPU and none has %WORKER_UNBOUND set and concurrency management
  61         * is in effect.
  62         *
  63         * While DISASSOCIATED, the cpu may be offline and all workers have
  64         * %WORKER_UNBOUND set and concurrency management disabled, and may
  65         * be executing on any CPU.  The pool behaves as an unbound one.
  66         *
  67         * Note that DISASSOCIATED should be flipped only while holding
  68         * attach_mutex to avoid changing binding state while
  69         * worker_attach_to_pool() is in progress.
  70         */
  71        POOL_MANAGER_ACTIVE     = 1 << 0,       /* being managed */
  72        POOL_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
  73
  74        /* worker flags */
  75        WORKER_DIE              = 1 << 1,       /* die die die */
  76        WORKER_IDLE             = 1 << 2,       /* is idle */
  77        WORKER_PREP             = 1 << 3,       /* preparing to run works */
  78        WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
  79        WORKER_UNBOUND          = 1 << 7,       /* worker is unbound */
  80        WORKER_REBOUND          = 1 << 8,       /* worker was rebound */
  81
  82        WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_CPU_INTENSIVE |
  83                                  WORKER_UNBOUND | WORKER_REBOUND,
  84
  85        NR_STD_WORKER_POOLS     = 2,            /* # standard pools per cpu */
  86
  87        UNBOUND_POOL_HASH_ORDER = 6,            /* hashed by pool->attrs */
  88        BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
  89
  90        MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
  91        IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
  92
  93        MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
  94                                                /* call for help after 10ms
  95                                                   (min two ticks) */
  96        MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
  97        CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
  98
  99        /*
 100         * Rescue workers are used only on emergencies and shared by
 101         * all cpus.  Give MIN_NICE.
 102         */
 103        RESCUER_NICE_LEVEL      = MIN_NICE,
 104        HIGHPRI_NICE_LEVEL      = MIN_NICE,
 105
 106        WQ_NAME_LEN             = 24,
 107};
 108
 109/*
 110 * Structure fields follow one of the following exclusion rules.
 111 *
 112 * I: Modifiable by initialization/destruction paths and read-only for
 113 *    everyone else.
 114 *
 115 * P: Preemption protected.  Disabling preemption is enough and should
 116 *    only be modified and accessed from the local cpu.
 117 *
 118 * L: pool->lock protected.  Access with pool->lock held.
 119 *
 120 * X: During normal operation, modification requires pool->lock and should
 121 *    be done only from local cpu.  Either disabling preemption on local
 122 *    cpu or grabbing pool->lock is enough for read access.  If
 123 *    POOL_DISASSOCIATED is set, it's identical to L.
 124 *
 125 * A: pool->attach_mutex protected.
 126 *
 127 * PL: wq_pool_mutex protected.
 128 *
 129 * PR: wq_pool_mutex protected for writes.  Sched-RCU protected for reads.
 130 *
 131 * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
 132 *
 133 * PWR: wq_pool_mutex and wq->mutex protected for writes.  Either or
 134 *      sched-RCU for reads.
 135 *
 136 * WQ: wq->mutex protected.
 137 *
 138 * WR: wq->mutex protected for writes.  Sched-RCU protected for reads.
 139 *
 140 * MD: wq_mayday_lock protected.
 141 */
 142
 143/* struct worker is defined in workqueue_internal.h */
 144
 145struct worker_pool {
 146        spinlock_t              lock;           /* the pool lock */
 147        int                     cpu;            /* I: the associated cpu */
 148        int                     node;           /* I: the associated node ID */
 149        int                     id;             /* I: pool ID */
 150        unsigned int            flags;          /* X: flags */
 151
 152        unsigned long           watchdog_ts;    /* L: watchdog timestamp */
 153
 154        struct list_head        worklist;       /* L: list of pending works */
 155        int                     nr_workers;     /* L: total number of workers */
 156
 157        /* nr_idle includes the ones off idle_list for rebinding */
 158        int                     nr_idle;        /* L: currently idle ones */
 159
 160        struct list_head        idle_list;      /* X: list of idle workers */
 161        struct timer_list       idle_timer;     /* L: worker idle timeout */
 162        struct timer_list       mayday_timer;   /* L: SOS timer for workers */
 163
 164        /* a workers is either on busy_hash or idle_list, or the manager */
 165        DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
 166                                                /* L: hash of busy workers */
 167
 168        /* see manage_workers() for details on the two manager mutexes */
 169        struct worker           *manager;       /* L: purely informational */
 170        struct mutex            attach_mutex;   /* attach/detach exclusion */
 171        struct list_head        workers;        /* A: attached workers */
 172        struct completion       *detach_completion; /* all workers detached */
 173
 174        struct ida              worker_ida;     /* worker IDs for task name */
 175
 176        struct workqueue_attrs  *attrs;         /* I: worker attributes */
 177        struct hlist_node       hash_node;      /* PL: unbound_pool_hash node */
 178        int                     refcnt;         /* PL: refcnt for unbound pools */
 179
 180        /*
 181         * The current concurrency level.  As it's likely to be accessed
 182         * from other CPUs during try_to_wake_up(), put it in a separate
 183         * cacheline.
 184         */
 185        atomic_t                nr_running ____cacheline_aligned_in_smp;
 186
 187        /*
 188         * Destruction of pool is sched-RCU protected to allow dereferences
 189         * from get_work_pool().
 190         */
 191        struct rcu_head         rcu;
 192} ____cacheline_aligned_in_smp;
 193
 194/*
 195 * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
 196 * of work_struct->data are used for flags and the remaining high bits
 197 * point to the pwq; thus, pwqs need to be aligned at two's power of the
 198 * number of flag bits.
 199 */
 200struct pool_workqueue {
 201        struct worker_pool      *pool;          /* I: the associated pool */
 202        struct workqueue_struct *wq;            /* I: the owning workqueue */
 203        int                     work_color;     /* L: current color */
 204        int                     flush_color;    /* L: flushing color */
 205        int                     refcnt;         /* L: reference count */
 206        int                     nr_in_flight[WORK_NR_COLORS];
 207                                                /* L: nr of in_flight works */
 208        int                     nr_active;      /* L: nr of active works */
 209        int                     max_active;     /* L: max active works */
 210        struct list_head        delayed_works;  /* L: delayed works */
 211        struct list_head        pwqs_node;      /* WR: node on wq->pwqs */
 212        struct list_head        mayday_node;    /* MD: node on wq->maydays */
 213
 214        /*
 215         * Release of unbound pwq is punted to system_wq.  See put_pwq()
 216         * and pwq_unbound_release_workfn() for details.  pool_workqueue
 217         * itself is also sched-RCU protected so that the first pwq can be
 218         * determined without grabbing wq->mutex.
 219         */
 220        struct work_struct      unbound_release_work;
 221        struct rcu_head         rcu;
 222} __aligned(1 << WORK_STRUCT_FLAG_BITS);
 223
 224/*
 225 * Structure used to wait for workqueue flush.
 226 */
 227struct wq_flusher {
 228        struct list_head        list;           /* WQ: list of flushers */
 229        int                     flush_color;    /* WQ: flush color waiting for */
 230        struct completion       done;           /* flush completion */
 231};
 232
 233struct wq_device;
 234
 235/*
 236 * The externally visible workqueue.  It relays the issued work items to
 237 * the appropriate worker_pool through its pool_workqueues.
 238 */
 239struct workqueue_struct {
 240        struct list_head        pwqs;           /* WR: all pwqs of this wq */
 241        struct list_head        list;           /* PR: list of all workqueues */
 242
 243        struct mutex            mutex;          /* protects this wq */
 244        int                     work_color;     /* WQ: current work color */
 245        int                     flush_color;    /* WQ: current flush color */
 246        atomic_t                nr_pwqs_to_flush; /* flush in progress */
 247        struct wq_flusher       *first_flusher; /* WQ: first flusher */
 248        struct list_head        flusher_queue;  /* WQ: flush waiters */
 249        struct list_head        flusher_overflow; /* WQ: flush overflow list */
 250
 251        struct list_head        maydays;        /* MD: pwqs requesting rescue */
 252        struct worker           *rescuer;       /* I: rescue worker */
 253
 254        int                     nr_drainers;    /* WQ: drain in progress */
 255        int                     saved_max_active; /* WQ: saved pwq max_active */
 256
 257        struct workqueue_attrs  *unbound_attrs; /* PW: only for unbound wqs */
 258        struct pool_workqueue   *dfl_pwq;       /* PW: only for unbound wqs */
 259
 260#ifdef CONFIG_SYSFS
 261        struct wq_device        *wq_dev;        /* I: for sysfs interface */
 262#endif
 263#ifdef CONFIG_LOCKDEP
 264        struct lockdep_map      lockdep_map;
 265#endif
 266        char                    name[WQ_NAME_LEN]; /* I: workqueue name */
 267
 268        /*
 269         * Destruction of workqueue_struct is sched-RCU protected to allow
 270         * walking the workqueues list without grabbing wq_pool_mutex.
 271         * This is used to dump all workqueues from sysrq.
 272         */
 273        struct rcu_head         rcu;
 274
 275        /* hot fields used during command issue, aligned to cacheline */
 276        unsigned int            flags ____cacheline_aligned; /* WQ: WQ_* flags */
 277        struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
 278        struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
 279};
 280
 281static struct kmem_cache *pwq_cache;
 282
 283static cpumask_var_t *wq_numa_possible_cpumask;
 284                                        /* possible CPUs of each node */
 285
 286static bool wq_disable_numa;
 287module_param_named(disable_numa, wq_disable_numa, bool, 0444);
 288
 289/* see the comment above the definition of WQ_POWER_EFFICIENT */
 290static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
 291module_param_named(power_efficient, wq_power_efficient, bool, 0444);
 292
 293static bool wq_online;                  /* can kworkers be created yet? */
 294
 295static bool wq_numa_enabled;            /* unbound NUMA affinity enabled */
 296
 297/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
 298static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
 299
 300static DEFINE_MUTEX(wq_pool_mutex);     /* protects pools and workqueues list */
 301static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
 302static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
 303
 304static LIST_HEAD(workqueues);           /* PR: list of all workqueues */
 305static bool workqueue_freezing;         /* PL: have wqs started freezing? */
 306
 307/* PL: allowable cpus for unbound wqs and work items */
 308static cpumask_var_t wq_unbound_cpumask;
 309
 310/* CPU where unbound work was last round robin scheduled from this CPU */
 311static DEFINE_PER_CPU(int, wq_rr_cpu_last);
 312
 313/*
 314 * Local execution of unbound work items is no longer guaranteed.  The
 315 * following always forces round-robin CPU selection on unbound work items
 316 * to uncover usages which depend on it.
 317 */
 318#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
 319static bool wq_debug_force_rr_cpu = true;
 320#else
 321static bool wq_debug_force_rr_cpu = false;
 322#endif
 323module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
 324
 325/* the per-cpu worker pools */
 326static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
 327
 328static DEFINE_IDR(worker_pool_idr);     /* PR: idr of all pools */
 329
 330/* PL: hash of all unbound pools keyed by pool->attrs */
 331static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
 332
 333/* I: attributes used when instantiating standard unbound pools on demand */
 334static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
 335
 336/* I: attributes used when instantiating ordered pools on demand */
 337static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
 338
 339struct workqueue_struct *system_wq __read_mostly;
 340EXPORT_SYMBOL(system_wq);
 341struct workqueue_struct *system_highpri_wq __read_mostly;
 342EXPORT_SYMBOL_GPL(system_highpri_wq);
 343struct workqueue_struct *system_long_wq __read_mostly;
 344EXPORT_SYMBOL_GPL(system_long_wq);
 345struct workqueue_struct *system_unbound_wq __read_mostly;
 346EXPORT_SYMBOL_GPL(system_unbound_wq);
 347struct workqueue_struct *system_freezable_wq __read_mostly;
 348EXPORT_SYMBOL_GPL(system_freezable_wq);
 349struct workqueue_struct *system_power_efficient_wq __read_mostly;
 350EXPORT_SYMBOL_GPL(system_power_efficient_wq);
 351struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
 352EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
 353
 354static int worker_thread(void *__worker);
 355static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
 356
 357#define CREATE_TRACE_POINTS
 358#include <trace/events/workqueue.h>
 359
 360#define assert_rcu_or_pool_mutex()                                      \
 361        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&                 \
 362                         !lockdep_is_held(&wq_pool_mutex),              \
 363                         "sched RCU or wq_pool_mutex should be held")
 364
 365#define assert_rcu_or_wq_mutex(wq)                                      \
 366        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&                 \
 367                         !lockdep_is_held(&wq->mutex),                  \
 368                         "sched RCU or wq->mutex should be held")
 369
 370#define assert_rcu_or_wq_mutex_or_pool_mutex(wq)                        \
 371        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&                 \
 372                         !lockdep_is_held(&wq->mutex) &&                \
 373                         !lockdep_is_held(&wq_pool_mutex),              \
 374                         "sched RCU, wq->mutex or wq_pool_mutex should be held")
 375
 376#define for_each_cpu_worker_pool(pool, cpu)                             \
 377        for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];               \
 378             (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
 379             (pool)++)
 380
 381/**
 382 * for_each_pool - iterate through all worker_pools in the system
 383 * @pool: iteration cursor
 384 * @pi: integer used for iteration
 385 *
 386 * This must be called either with wq_pool_mutex held or sched RCU read
 387 * locked.  If the pool needs to be used beyond the locking in effect, the
 388 * caller is responsible for guaranteeing that the pool stays online.
 389 *
 390 * The if/else clause exists only for the lockdep assertion and can be
 391 * ignored.
 392 */
 393#define for_each_pool(pool, pi)                                         \
 394        idr_for_each_entry(&worker_pool_idr, pool, pi)                  \
 395                if (({ assert_rcu_or_pool_mutex(); false; })) { }       \
 396                else
 397
 398/**
 399 * for_each_pool_worker - iterate through all workers of a worker_pool
 400 * @worker: iteration cursor
 401 * @pool: worker_pool to iterate workers of
 402 *
 403 * This must be called with @pool->attach_mutex.
 404 *
 405 * The if/else clause exists only for the lockdep assertion and can be
 406 * ignored.
 407 */
 408#define for_each_pool_worker(worker, pool)                              \
 409        list_for_each_entry((worker), &(pool)->workers, node)           \
 410                if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \
 411                else
 412
 413/**
 414 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
 415 * @pwq: iteration cursor
 416 * @wq: the target workqueue
 417 *
 418 * This must be called either with wq->mutex held or sched RCU read locked.
 419 * If the pwq needs to be used beyond the locking in effect, the caller is
 420 * responsible for guaranteeing that the pwq stays online.
 421 *
 422 * The if/else clause exists only for the lockdep assertion and can be
 423 * ignored.
 424 */
 425#define for_each_pwq(pwq, wq)                                           \
 426        list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node)          \
 427                if (({ assert_rcu_or_wq_mutex(wq); false; })) { }       \
 428                else
 429
 430#ifdef CONFIG_DEBUG_OBJECTS_WORK
 431
 432static struct debug_obj_descr work_debug_descr;
 433
 434static void *work_debug_hint(void *addr)
 435{
 436        return ((struct work_struct *) addr)->func;
 437}
 438
 439static bool work_is_static_object(void *addr)
 440{
 441        struct work_struct *work = addr;
 442
 443        return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
 444}
 445
 446/*
 447 * fixup_init is called when:
 448 * - an active object is initialized
 449 */
 450static bool work_fixup_init(void *addr, enum debug_obj_state state)
 451{
 452        struct work_struct *work = addr;
 453
 454        switch (state) {
 455        case ODEBUG_STATE_ACTIVE:
 456                cancel_work_sync(work);
 457                debug_object_init(work, &work_debug_descr);
 458                return true;
 459        default:
 460                return false;
 461        }
 462}
 463
 464/*
 465 * fixup_free is called when:
 466 * - an active object is freed
 467 */
 468static bool work_fixup_free(void *addr, enum debug_obj_state state)
 469{
 470        struct work_struct *work = addr;
 471
 472        switch (state) {
 473        case ODEBUG_STATE_ACTIVE:
 474                cancel_work_sync(work);
 475                debug_object_free(work, &work_debug_descr);
 476                return true;
 477        default:
 478                return false;
 479        }
 480}
 481
 482static struct debug_obj_descr work_debug_descr = {
 483        .name           = "work_struct",
 484        .debug_hint     = work_debug_hint,
 485        .is_static_object = work_is_static_object,
 486        .fixup_init     = work_fixup_init,
 487        .fixup_free     = work_fixup_free,
 488};
 489
 490static inline void debug_work_activate(struct work_struct *work)
 491{
 492        debug_object_activate(work, &work_debug_descr);
 493}
 494
 495static inline void debug_work_deactivate(struct work_struct *work)
 496{
 497        debug_object_deactivate(work, &work_debug_descr);
 498}
 499
 500void __init_work(struct work_struct *work, int onstack)
 501{
 502        if (onstack)
 503                debug_object_init_on_stack(work, &work_debug_descr);
 504        else
 505                debug_object_init(work, &work_debug_descr);
 506}
 507EXPORT_SYMBOL_GPL(__init_work);
 508
 509void destroy_work_on_stack(struct work_struct *work)
 510{
 511        debug_object_free(work, &work_debug_descr);
 512}
 513EXPORT_SYMBOL_GPL(destroy_work_on_stack);
 514
 515void destroy_delayed_work_on_stack(struct delayed_work *work)
 516{
 517        destroy_timer_on_stack(&work->timer);
 518        debug_object_free(&work->work, &work_debug_descr);
 519}
 520EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
 521
 522#else
 523static inline void debug_work_activate(struct work_struct *work) { }
 524static inline void debug_work_deactivate(struct work_struct *work) { }
 525#endif
 526
 527/**
 528 * worker_pool_assign_id - allocate ID and assing it to @pool
 529 * @pool: the pool pointer of interest
 530 *
 531 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
 532 * successfully, -errno on failure.
 533 */
 534static int worker_pool_assign_id(struct worker_pool *pool)
 535{
 536        int ret;
 537
 538        lockdep_assert_held(&wq_pool_mutex);
 539
 540        ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
 541                        GFP_KERNEL);
 542        if (ret >= 0) {
 543                pool->id = ret;
 544                return 0;
 545        }
 546        return ret;
 547}
 548
 549/**
 550 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
 551 * @wq: the target workqueue
 552 * @node: the node ID
 553 *
 554 * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
 555 * read locked.
 556 * If the pwq needs to be used beyond the locking in effect, the caller is
 557 * responsible for guaranteeing that the pwq stays online.
 558 *
 559 * Return: The unbound pool_workqueue for @node.
 560 */
 561static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
 562                                                  int node)
 563{
 564        assert_rcu_or_wq_mutex_or_pool_mutex(wq);
 565
 566        /*
 567         * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
 568         * delayed item is pending.  The plan is to keep CPU -> NODE
 569         * mapping valid and stable across CPU on/offlines.  Once that
 570         * happens, this workaround can be removed.
 571         */
 572        if (unlikely(node == NUMA_NO_NODE))
 573                return wq->dfl_pwq;
 574
 575        return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
 576}
 577
 578static unsigned int work_color_to_flags(int color)
 579{
 580        return color << WORK_STRUCT_COLOR_SHIFT;
 581}
 582
 583static int get_work_color(struct work_struct *work)
 584{
 585        return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
 586                ((1 << WORK_STRUCT_COLOR_BITS) - 1);
 587}
 588
 589static int work_next_color(int color)
 590{
 591        return (color + 1) % WORK_NR_COLORS;
 592}
 593
 594/*
 595 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
 596 * contain the pointer to the queued pwq.  Once execution starts, the flag
 597 * is cleared and the high bits contain OFFQ flags and pool ID.
 598 *
 599 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
 600 * and clear_work_data() can be used to set the pwq, pool or clear
 601 * work->data.  These functions should only be called while the work is
 602 * owned - ie. while the PENDING bit is set.
 603 *
 604 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
 605 * corresponding to a work.  Pool is available once the work has been
 606 * queued anywhere after initialization until it is sync canceled.  pwq is
 607 * available only while the work item is queued.
 608 *
 609 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
 610 * canceled.  While being canceled, a work item may have its PENDING set
 611 * but stay off timer and worklist for arbitrarily long and nobody should
 612 * try to steal the PENDING bit.
 613 */
 614static inline void set_work_data(struct work_struct *work, unsigned long data,
 615                                 unsigned long flags)
 616{
 617        WARN_ON_ONCE(!work_pending(work));
 618        atomic_long_set(&work->data, data | flags | work_static(work));
 619}
 620
 621static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
 622                         unsigned long extra_flags)
 623{
 624        set_work_data(work, (unsigned long)pwq,
 625                      WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
 626}
 627
 628static void set_work_pool_and_keep_pending(struct work_struct *work,
 629                                           int pool_id)
 630{
 631        set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
 632                      WORK_STRUCT_PENDING);
 633}
 634
 635static void set_work_pool_and_clear_pending(struct work_struct *work,
 636                                            int pool_id)
 637{
 638        /*
 639         * The following wmb is paired with the implied mb in
 640         * test_and_set_bit(PENDING) and ensures all updates to @work made
 641         * here are visible to and precede any updates by the next PENDING
 642         * owner.
 643         */
 644        smp_wmb();
 645        set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
 646        /*
 647         * The following mb guarantees that previous clear of a PENDING bit
 648         * will not be reordered with any speculative LOADS or STORES from
 649         * work->current_func, which is executed afterwards.  This possible
 650         * reordering can lead to a missed execution on attempt to qeueue
 651         * the same @work.  E.g. consider this case:
 652         *
 653         *   CPU#0                         CPU#1
 654         *   ----------------------------  --------------------------------
 655         *
 656         * 1  STORE event_indicated
 657         * 2  queue_work_on() {
 658         * 3    test_and_set_bit(PENDING)
 659         * 4 }                             set_..._and_clear_pending() {
 660         * 5                                 set_work_data() # clear bit
 661         * 6                                 smp_mb()
 662         * 7                               work->current_func() {
 663         * 8                                  LOAD event_indicated
 664         *                                 }
 665         *
 666         * Without an explicit full barrier speculative LOAD on line 8 can
 667         * be executed before CPU#0 does STORE on line 1.  If that happens,
 668         * CPU#0 observes the PENDING bit is still set and new execution of
 669         * a @work is not queued in a hope, that CPU#1 will eventually
 670         * finish the queued @work.  Meanwhile CPU#1 does not see
 671         * event_indicated is set, because speculative LOAD was executed
 672         * before actual STORE.
 673         */
 674        smp_mb();
 675}
 676
 677static void clear_work_data(struct work_struct *work)
 678{
 679        smp_wmb();      /* see set_work_pool_and_clear_pending() */
 680        set_work_data(work, WORK_STRUCT_NO_POOL, 0);
 681}
 682
 683static struct pool_workqueue *get_work_pwq(struct work_struct *work)
 684{
 685        unsigned long data = atomic_long_read(&work->data);
 686
 687        if (data & WORK_STRUCT_PWQ)
 688                return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
 689        else
 690                return NULL;
 691}
 692
 693/**
 694 * get_work_pool - return the worker_pool a given work was associated with
 695 * @work: the work item of interest
 696 *
 697 * Pools are created and destroyed under wq_pool_mutex, and allows read
 698 * access under sched-RCU read lock.  As such, this function should be
 699 * called under wq_pool_mutex or with preemption disabled.
 700 *
 701 * All fields of the returned pool are accessible as long as the above
 702 * mentioned locking is in effect.  If the returned pool needs to be used
 703 * beyond the critical section, the caller is responsible for ensuring the
 704 * returned pool is and stays online.
 705 *
 706 * Return: The worker_pool @work was last associated with.  %NULL if none.
 707 */
 708static struct worker_pool *get_work_pool(struct work_struct *work)
 709{
 710        unsigned long data = atomic_long_read(&work->data);
 711        int pool_id;
 712
 713        assert_rcu_or_pool_mutex();
 714
 715        if (data & WORK_STRUCT_PWQ)
 716                return ((struct pool_workqueue *)
 717                        (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
 718
 719        pool_id = data >> WORK_OFFQ_POOL_SHIFT;
 720        if (pool_id == WORK_OFFQ_POOL_NONE)
 721                return NULL;
 722
 723        return idr_find(&worker_pool_idr, pool_id);
 724}
 725
 726/**
 727 * get_work_pool_id - return the worker pool ID a given work is associated with
 728 * @work: the work item of interest
 729 *
 730 * Return: The worker_pool ID @work was last associated with.
 731 * %WORK_OFFQ_POOL_NONE if none.
 732 */
 733static int get_work_pool_id(struct work_struct *work)
 734{
 735        unsigned long data = atomic_long_read(&work->data);
 736
 737        if (data & WORK_STRUCT_PWQ)
 738                return ((struct pool_workqueue *)
 739                        (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
 740
 741        return data >> WORK_OFFQ_POOL_SHIFT;
 742}
 743
 744static void mark_work_canceling(struct work_struct *work)
 745{
 746        unsigned long pool_id = get_work_pool_id(work);
 747
 748        pool_id <<= WORK_OFFQ_POOL_SHIFT;
 749        set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
 750}
 751
 752static bool work_is_canceling(struct work_struct *work)
 753{
 754        unsigned long data = atomic_long_read(&work->data);
 755
 756        return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
 757}
 758
 759/*
 760 * Policy functions.  These define the policies on how the global worker
 761 * pools are managed.  Unless noted otherwise, these functions assume that
 762 * they're being called with pool->lock held.
 763 */
 764
 765static bool __need_more_worker(struct worker_pool *pool)
 766{
 767        return !atomic_read(&pool->nr_running);
 768}
 769
 770/*
 771 * Need to wake up a worker?  Called from anything but currently
 772 * running workers.
 773 *
 774 * Note that, because unbound workers never contribute to nr_running, this
 775 * function will always return %true for unbound pools as long as the
 776 * worklist isn't empty.
 777 */
 778static bool need_more_worker(struct worker_pool *pool)
 779{
 780        return !list_empty(&pool->worklist) && __need_more_worker(pool);
 781}
 782
 783/* Can I start working?  Called from busy but !running workers. */
 784static bool may_start_working(struct worker_pool *pool)
 785{
 786        return pool->nr_idle;
 787}
 788
 789/* Do I need to keep working?  Called from currently running workers. */
 790static bool keep_working(struct worker_pool *pool)
 791{
 792        return !list_empty(&pool->worklist) &&
 793                atomic_read(&pool->nr_running) <= 1;
 794}
 795
 796/* Do we need a new worker?  Called from manager. */
 797static bool need_to_create_worker(struct worker_pool *pool)
 798{
 799        return need_more_worker(pool) && !may_start_working(pool);
 800}
 801
 802/* Do we have too many workers and should some go away? */
 803static bool too_many_workers(struct worker_pool *pool)
 804{
 805        bool managing = pool->flags & POOL_MANAGER_ACTIVE;
 806        int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
 807        int nr_busy = pool->nr_workers - nr_idle;
 808
 809        return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
 810}
 811
 812/*
 813 * Wake up functions.
 814 */
 815
 816/* Return the first idle worker.  Safe with preemption disabled */
 817static struct worker *first_idle_worker(struct worker_pool *pool)
 818{
 819        if (unlikely(list_empty(&pool->idle_list)))
 820                return NULL;
 821
 822        return list_first_entry(&pool->idle_list, struct worker, entry);
 823}
 824
 825/**
 826 * wake_up_worker - wake up an idle worker
 827 * @pool: worker pool to wake worker from
 828 *
 829 * Wake up the first idle worker of @pool.
 830 *
 831 * CONTEXT:
 832 * spin_lock_irq(pool->lock).
 833 */
 834static void wake_up_worker(struct worker_pool *pool)
 835{
 836        struct worker *worker = first_idle_worker(pool);
 837
 838        if (likely(worker))
 839                wake_up_process(worker->task);
 840}
 841
 842/**
 843 * wq_worker_waking_up - a worker is waking up
 844 * @task: task waking up
 845 * @cpu: CPU @task is waking up to
 846 *
 847 * This function is called during try_to_wake_up() when a worker is
 848 * being awoken.
 849 *
 850 * CONTEXT:
 851 * spin_lock_irq(rq->lock)
 852 */
 853void wq_worker_waking_up(struct task_struct *task, int cpu)
 854{
 855        struct worker *worker = kthread_data(task);
 856
 857        if (!(worker->flags & WORKER_NOT_RUNNING)) {
 858                WARN_ON_ONCE(worker->pool->cpu != cpu);
 859                atomic_inc(&worker->pool->nr_running);
 860        }
 861}
 862
 863/**
 864 * wq_worker_sleeping - a worker is going to sleep
 865 * @task: task going to sleep
 866 *
 867 * This function is called during schedule() when a busy worker is
 868 * going to sleep.  Worker on the same cpu can be woken up by
 869 * returning pointer to its task.
 870 *
 871 * CONTEXT:
 872 * spin_lock_irq(rq->lock)
 873 *
 874 * Return:
 875 * Worker task on @cpu to wake up, %NULL if none.
 876 */
 877struct task_struct *wq_worker_sleeping(struct task_struct *task)
 878{
 879        struct worker *worker = kthread_data(task), *to_wakeup = NULL;
 880        struct worker_pool *pool;
 881
 882        /*
 883         * Rescuers, which may not have all the fields set up like normal
 884         * workers, also reach here, let's not access anything before
 885         * checking NOT_RUNNING.
 886         */
 887        if (worker->flags & WORKER_NOT_RUNNING)
 888                return NULL;
 889
 890        pool = worker->pool;
 891
 892        /* this can only happen on the local cpu */
 893        if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id()))
 894                return NULL;
 895
 896        /*
 897         * The counterpart of the following dec_and_test, implied mb,
 898         * worklist not empty test sequence is in insert_work().
 899         * Please read comment there.
 900         *
 901         * NOT_RUNNING is clear.  This means that we're bound to and
 902         * running on the local cpu w/ rq lock held and preemption
 903         * disabled, which in turn means that none else could be
 904         * manipulating idle_list, so dereferencing idle_list without pool
 905         * lock is safe.
 906         */
 907        if (atomic_dec_and_test(&pool->nr_running) &&
 908            !list_empty(&pool->worklist))
 909                to_wakeup = first_idle_worker(pool);
 910        return to_wakeup ? to_wakeup->task : NULL;
 911}
 912
 913/**
 914 * worker_set_flags - set worker flags and adjust nr_running accordingly
 915 * @worker: self
 916 * @flags: flags to set
 917 *
 918 * Set @flags in @worker->flags and adjust nr_running accordingly.
 919 *
 920 * CONTEXT:
 921 * spin_lock_irq(pool->lock)
 922 */
 923static inline void worker_set_flags(struct worker *worker, unsigned int flags)
 924{
 925        struct worker_pool *pool = worker->pool;
 926
 927        WARN_ON_ONCE(worker->task != current);
 928
 929        /* If transitioning into NOT_RUNNING, adjust nr_running. */
 930        if ((flags & WORKER_NOT_RUNNING) &&
 931            !(worker->flags & WORKER_NOT_RUNNING)) {
 932                atomic_dec(&pool->nr_running);
 933        }
 934
 935        worker->flags |= flags;
 936}
 937
 938/**
 939 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
 940 * @worker: self
 941 * @flags: flags to clear
 942 *
 943 * Clear @flags in @worker->flags and adjust nr_running accordingly.
 944 *
 945 * CONTEXT:
 946 * spin_lock_irq(pool->lock)
 947 */
 948static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 949{
 950        struct worker_pool *pool = worker->pool;
 951        unsigned int oflags = worker->flags;
 952
 953        WARN_ON_ONCE(worker->task != current);
 954
 955        worker->flags &= ~flags;
 956
 957        /*
 958         * If transitioning out of NOT_RUNNING, increment nr_running.  Note
 959         * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
 960         * of multiple flags, not a single flag.
 961         */
 962        if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
 963                if (!(worker->flags & WORKER_NOT_RUNNING))
 964                        atomic_inc(&pool->nr_running);
 965}
 966
 967/**
 968 * find_worker_executing_work - find worker which is executing a work
 969 * @pool: pool of interest
 970 * @work: work to find worker for
 971 *
 972 * Find a worker which is executing @work on @pool by searching
 973 * @pool->busy_hash which is keyed by the address of @work.  For a worker
 974 * to match, its current execution should match the address of @work and
 975 * its work function.  This is to avoid unwanted dependency between
 976 * unrelated work executions through a work item being recycled while still
 977 * being executed.
 978 *
 979 * This is a bit tricky.  A work item may be freed once its execution
 980 * starts and nothing prevents the freed area from being recycled for
 981 * another work item.  If the same work item address ends up being reused
 982 * before the original execution finishes, workqueue will identify the
 983 * recycled work item as currently executing and make it wait until the
 984 * current execution finishes, introducing an unwanted dependency.
 985 *
 986 * This function checks the work item address and work function to avoid
 987 * false positives.  Note that this isn't complete as one may construct a
 988 * work function which can introduce dependency onto itself through a
 989 * recycled work item.  Well, if somebody wants to shoot oneself in the
 990 * foot that badly, there's only so much we can do, and if such deadlock
 991 * actually occurs, it should be easy to locate the culprit work function.
 992 *
 993 * CONTEXT:
 994 * spin_lock_irq(pool->lock).
 995 *
 996 * Return:
 997 * Pointer to worker which is executing @work if found, %NULL
 998 * otherwise.
 999 */
1000static struct worker *find_worker_executing_work(struct worker_pool *pool,
1001                                                 struct work_struct *work)
1002{
1003        struct worker *worker;
1004
1005        hash_for_each_possible(pool->busy_hash, worker, hentry,
1006                               (unsigned long)work)
1007                if (worker->current_work == work &&
1008                    worker->current_func == work->func)
1009                        return worker;
1010
1011        return NULL;
1012}
1013
1014/**
1015 * move_linked_works - move linked works to a list
1016 * @work: start of series of works to be scheduled
1017 * @head: target list to append @work to
1018 * @nextp: out parameter for nested worklist walking
1019 *
1020 * Schedule linked works starting from @work to @head.  Work series to
1021 * be scheduled starts at @work and includes any consecutive work with
1022 * WORK_STRUCT_LINKED set in its predecessor.
1023 *
1024 * If @nextp is not NULL, it's updated to point to the next work of
1025 * the last scheduled work.  This allows move_linked_works() to be
1026 * nested inside outer list_for_each_entry_safe().
1027 *
1028 * CONTEXT:
1029 * spin_lock_irq(pool->lock).
1030 */
1031static void move_linked_works(struct work_struct *work, struct list_head *head,
1032                              struct work_struct **nextp)
1033{
1034        struct work_struct *n;
1035
1036        /*
1037         * Linked worklist will always end before the end of the list,
1038         * use NULL for list head.
1039         */
1040        list_for_each_entry_safe_from(work, n, NULL, entry) {
1041                list_move_tail(&work->entry, head);
1042                if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1043                        break;
1044        }
1045
1046        /*
1047         * If we're already inside safe list traversal and have moved
1048         * multiple works to the scheduled queue, the next position
1049         * needs to be updated.
1050         */
1051        if (nextp)
1052                *nextp = n;
1053}
1054
1055/**
1056 * get_pwq - get an extra reference on the specified pool_workqueue
1057 * @pwq: pool_workqueue to get
1058 *
1059 * Obtain an extra reference on @pwq.  The caller should guarantee that
1060 * @pwq has positive refcnt and be holding the matching pool->lock.
1061 */
1062static void get_pwq(struct pool_workqueue *pwq)
1063{
1064        lockdep_assert_held(&pwq->pool->lock);
1065        WARN_ON_ONCE(pwq->refcnt <= 0);
1066        pwq->refcnt++;
1067}
1068
1069/**
1070 * put_pwq - put a pool_workqueue reference
1071 * @pwq: pool_workqueue to put
1072 *
1073 * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
1074 * destruction.  The caller should be holding the matching pool->lock.
1075 */
1076static void put_pwq(struct pool_workqueue *pwq)
1077{
1078        lockdep_assert_held(&pwq->pool->lock);
1079        if (likely(--pwq->refcnt))
1080                return;
1081        if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1082                return;
1083        /*
1084         * @pwq can't be released under pool->lock, bounce to
1085         * pwq_unbound_release_workfn().  This never recurses on the same
1086         * pool->lock as this path is taken only for unbound workqueues and
1087         * the release work item is scheduled on a per-cpu workqueue.  To
1088         * avoid lockdep warning, unbound pool->locks are given lockdep
1089         * subclass of 1 in get_unbound_pool().
1090         */
1091        schedule_work(&pwq->unbound_release_work);
1092}
1093
1094/**
1095 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1096 * @pwq: pool_workqueue to put (can be %NULL)
1097 *
1098 * put_pwq() with locking.  This function also allows %NULL @pwq.
1099 */
1100static void put_pwq_unlocked(struct pool_workqueue *pwq)
1101{
1102        if (pwq) {
1103                /*
1104                 * As both pwqs and pools are sched-RCU protected, the
1105                 * following lock operations are safe.
1106                 */
1107                spin_lock_irq(&pwq->pool->lock);
1108                put_pwq(pwq);
1109                spin_unlock_irq(&pwq->pool->lock);
1110        }
1111}
1112
1113static void pwq_activate_delayed_work(struct work_struct *work)
1114{
1115        struct pool_workqueue *pwq = get_work_pwq(work);
1116
1117        trace_workqueue_activate_work(work);
1118        if (list_empty(&pwq->pool->worklist))
1119                pwq->pool->watchdog_ts = jiffies;
1120        move_linked_works(work, &pwq->pool->worklist, NULL);
1121        __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1122        pwq->nr_active++;
1123}
1124
1125static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
1126{
1127        struct work_struct *work = list_first_entry(&pwq->delayed_works,
1128                                                    struct work_struct, entry);
1129
1130        pwq_activate_delayed_work(work);
1131}
1132
1133/**
1134 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1135 * @pwq: pwq of interest
1136 * @color: color of work which left the queue
1137 *
1138 * A work either has completed or is removed from pending queue,
1139 * decrement nr_in_flight of its pwq and handle workqueue flushing.
1140 *
1141 * CONTEXT:
1142 * spin_lock_irq(pool->lock).
1143 */
1144static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1145{
1146        /* uncolored work items don't participate in flushing or nr_active */
1147        if (color == WORK_NO_COLOR)
1148                goto out_put;
1149
1150        pwq->nr_in_flight[color]--;
1151
1152        pwq->nr_active--;
1153        if (!list_empty(&pwq->delayed_works)) {
1154                /* one down, submit a delayed one */
1155                if (pwq->nr_active < pwq->max_active)
1156                        pwq_activate_first_delayed(pwq);
1157        }
1158
1159        /* is flush in progress and are we at the flushing tip? */
1160        if (likely(pwq->flush_color != color))
1161                goto out_put;
1162
1163        /* are there still in-flight works? */
1164        if (pwq->nr_in_flight[color])
1165                goto out_put;
1166
1167        /* this pwq is done, clear flush_color */
1168        pwq->flush_color = -1;
1169
1170        /*
1171         * If this was the last pwq, wake up the first flusher.  It
1172         * will handle the rest.
1173         */
1174        if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1175                complete(&pwq->wq->first_flusher->done);
1176out_put:
1177        put_pwq(pwq);
1178}
1179
1180/**
1181 * try_to_grab_pending - steal work item from worklist and disable irq
1182 * @work: work item to steal
1183 * @is_dwork: @work is a delayed_work
1184 * @flags: place to store irq state
1185 *
1186 * Try to grab PENDING bit of @work.  This function can handle @work in any
1187 * stable state - idle, on timer or on worklist.
1188 *
1189 * Return:
1190 *  1           if @work was pending and we successfully stole PENDING
1191 *  0           if @work was idle and we claimed PENDING
1192 *  -EAGAIN     if PENDING couldn't be grabbed at the moment, safe to busy-retry
1193 *  -ENOENT     if someone else is canceling @work, this state may persist
1194 *              for arbitrarily long
1195 *
1196 * Note:
1197 * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
1198 * interrupted while holding PENDING and @work off queue, irq must be
1199 * disabled on entry.  This, combined with delayed_work->timer being
1200 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1201 *
1202 * On successful return, >= 0, irq is disabled and the caller is
1203 * responsible for releasing it using local_irq_restore(*@flags).
1204 *
1205 * This function is safe to call from any context including IRQ handler.
1206 */
1207static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1208                               unsigned long *flags)
1209{
1210        struct worker_pool *pool;
1211        struct pool_workqueue *pwq;
1212
1213        local_irq_save(*flags);
1214
1215        /* try to steal the timer if it exists */
1216        if (is_dwork) {
1217                struct delayed_work *dwork = to_delayed_work(work);
1218
1219                /*
1220                 * dwork->timer is irqsafe.  If del_timer() fails, it's
1221                 * guaranteed that the timer is not queued anywhere and not
1222                 * running on the local CPU.
1223                 */
1224                if (likely(del_timer(&dwork->timer)))
1225                        return 1;
1226        }
1227
1228        /* try to claim PENDING the normal way */
1229        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1230                return 0;
1231
1232        /*
1233         * The queueing is in progress, or it is already queued. Try to
1234         * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1235         */
1236        pool = get_work_pool(work);
1237        if (!pool)
1238                goto fail;
1239
1240        spin_lock(&pool->lock);
1241        /*
1242         * work->data is guaranteed to point to pwq only while the work
1243         * item is queued on pwq->wq, and both updating work->data to point
1244         * to pwq on queueing and to pool on dequeueing are done under
1245         * pwq->pool->lock.  This in turn guarantees that, if work->data
1246         * points to pwq which is associated with a locked pool, the work
1247         * item is currently queued on that pool.
1248         */
1249        pwq = get_work_pwq(work);
1250        if (pwq && pwq->pool == pool) {
1251                debug_work_deactivate(work);
1252
1253                /*
1254                 * A delayed work item cannot be grabbed directly because
1255                 * it might have linked NO_COLOR work items which, if left
1256                 * on the delayed_list, will confuse pwq->nr_active
1257                 * management later on and cause stall.  Make sure the work
1258                 * item is activated before grabbing.
1259                 */
1260                if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1261                        pwq_activate_delayed_work(work);
1262
1263                list_del_init(&work->entry);
1264                pwq_dec_nr_in_flight(pwq, get_work_color(work));
1265
1266                /* work->data points to pwq iff queued, point to pool */
1267                set_work_pool_and_keep_pending(work, pool->id);
1268
1269                spin_unlock(&pool->lock);
1270                return 1;
1271        }
1272        spin_unlock(&pool->lock);
1273fail:
1274        local_irq_restore(*flags);
1275        if (work_is_canceling(work))
1276                return -ENOENT;
1277        cpu_relax();
1278        return -EAGAIN;
1279}
1280
1281/**
1282 * insert_work - insert a work into a pool
1283 * @pwq: pwq @work belongs to
1284 * @work: work to insert
1285 * @head: insertion point
1286 * @extra_flags: extra WORK_STRUCT_* flags to set
1287 *
1288 * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
1289 * work_struct flags.
1290 *
1291 * CONTEXT:
1292 * spin_lock_irq(pool->lock).
1293 */
1294static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1295                        struct list_head *head, unsigned int extra_flags)
1296{
1297        struct worker_pool *pool = pwq->pool;
1298
1299        /* we own @work, set data and link */
1300        set_work_pwq(work, pwq, extra_flags);
1301        list_add_tail(&work->entry, head);
1302        get_pwq(pwq);
1303
1304        /*
1305         * Ensure either wq_worker_sleeping() sees the above
1306         * list_add_tail() or we see zero nr_running to avoid workers lying
1307         * around lazily while there are works to be processed.
1308         */
1309        smp_mb();
1310
1311        if (__need_more_worker(pool))
1312                wake_up_worker(pool);
1313}
1314
1315/*
1316 * Test whether @work is being queued from another work executing on the
1317 * same workqueue.
1318 */
1319static bool is_chained_work(struct workqueue_struct *wq)
1320{
1321        struct worker *worker;
1322
1323        worker = current_wq_worker();
1324        /*
1325         * Return %true iff I'm a worker execuing a work item on @wq.  If
1326         * I'm @worker, it's safe to dereference it without locking.
1327         */
1328        return worker && worker->current_pwq->wq == wq;
1329}
1330
1331/*
1332 * When queueing an unbound work item to a wq, prefer local CPU if allowed
1333 * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
1334 * avoid perturbing sensitive tasks.
1335 */
1336static int wq_select_unbound_cpu(int cpu)
1337{
1338        static bool printed_dbg_warning;
1339        int new_cpu;
1340
1341        if (likely(!wq_debug_force_rr_cpu)) {
1342                if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1343                        return cpu;
1344        } else if (!printed_dbg_warning) {
1345                pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
1346                printed_dbg_warning = true;
1347        }
1348
1349        if (cpumask_empty(wq_unbound_cpumask))
1350                return cpu;
1351
1352        new_cpu = __this_cpu_read(wq_rr_cpu_last);
1353        new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1354        if (unlikely(new_cpu >= nr_cpu_ids)) {
1355                new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1356                if (unlikely(new_cpu >= nr_cpu_ids))
1357                        return cpu;
1358        }
1359        __this_cpu_write(wq_rr_cpu_last, new_cpu);
1360
1361        return new_cpu;
1362}
1363
1364static void __queue_work(int cpu, struct workqueue_struct *wq,
1365                         struct work_struct *work)
1366{
1367        struct pool_workqueue *pwq;
1368        struct worker_pool *last_pool;
1369        struct list_head *worklist;
1370        unsigned int work_flags;
1371        unsigned int req_cpu = cpu;
1372
1373        /*
1374         * While a work item is PENDING && off queue, a task trying to
1375         * steal the PENDING will busy-loop waiting for it to either get
1376         * queued or lose PENDING.  Grabbing PENDING and queueing should
1377         * happen with IRQ disabled.
1378         */
1379        WARN_ON_ONCE(!irqs_disabled());
1380
1381        debug_work_activate(work);
1382
1383        /* if draining, only works from the same workqueue are allowed */
1384        if (unlikely(wq->flags & __WQ_DRAINING) &&
1385            WARN_ON_ONCE(!is_chained_work(wq)))
1386                return;
1387retry:
1388        if (req_cpu == WORK_CPU_UNBOUND)
1389                cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1390
1391        /* pwq which will be used unless @work is executing elsewhere */
1392        if (!(wq->flags & WQ_UNBOUND))
1393                pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1394        else
1395                pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1396
1397        /*
1398         * If @work was previously on a different pool, it might still be
1399         * running there, in which case the work needs to be queued on that
1400         * pool to guarantee non-reentrancy.
1401         */
1402        last_pool = get_work_pool(work);
1403        if (last_pool && last_pool != pwq->pool) {
1404                struct worker *worker;
1405
1406                spin_lock(&last_pool->lock);
1407
1408                worker = find_worker_executing_work(last_pool, work);
1409
1410                if (worker && worker->current_pwq->wq == wq) {
1411                        pwq = worker->current_pwq;
1412                } else {
1413                        /* meh... not running there, queue here */
1414                        spin_unlock(&last_pool->lock);
1415                        spin_lock(&pwq->pool->lock);
1416                }
1417        } else {
1418                spin_lock(&pwq->pool->lock);
1419        }
1420
1421        /*
1422         * pwq is determined and locked.  For unbound pools, we could have
1423         * raced with pwq release and it could already be dead.  If its
1424         * refcnt is zero, repeat pwq selection.  Note that pwqs never die
1425         * without another pwq replacing it in the numa_pwq_tbl or while
1426         * work items are executing on it, so the retrying is guaranteed to
1427         * make forward-progress.
1428         */
1429        if (unlikely(!pwq->refcnt)) {
1430                if (wq->flags & WQ_UNBOUND) {
1431                        spin_unlock(&pwq->pool->lock);
1432                        cpu_relax();
1433                        goto retry;
1434                }
1435                /* oops */
1436                WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1437                          wq->name, cpu);
1438        }
1439
1440        /* pwq determined, queue */
1441        trace_workqueue_queue_work(req_cpu, pwq, work);
1442
1443        if (WARN_ON(!list_empty(&work->entry))) {
1444                spin_unlock(&pwq->pool->lock);
1445                return;
1446        }
1447
1448        pwq->nr_in_flight[pwq->work_color]++;
1449        work_flags = work_color_to_flags(pwq->work_color);
1450
1451        if (likely(pwq->nr_active < pwq->max_active)) {
1452                trace_workqueue_activate_work(work);
1453                pwq->nr_active++;
1454                worklist = &pwq->pool->worklist;
1455                if (list_empty(worklist))
1456                        pwq->pool->watchdog_ts = jiffies;
1457        } else {
1458                work_flags |= WORK_STRUCT_DELAYED;
1459                worklist = &pwq->delayed_works;
1460        }
1461
1462        insert_work(pwq, work, worklist, work_flags);
1463
1464        spin_unlock(&pwq->pool->lock);
1465}
1466
1467/**
1468 * queue_work_on - queue work on specific cpu
1469 * @cpu: CPU number to execute work on
1470 * @wq: workqueue to use
1471 * @work: work to queue
1472 *
1473 * We queue the work to a specific CPU, the caller must ensure it
1474 * can't go away.
1475 *
1476 * Return: %false if @work was already on a queue, %true otherwise.
1477 */
1478bool queue_work_on(int cpu, struct workqueue_struct *wq,
1479                   struct work_struct *work)
1480{
1481        bool ret = false;
1482        unsigned long flags;
1483
1484        local_irq_save(flags);
1485
1486        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1487                __queue_work(cpu, wq, work);
1488                ret = true;
1489        }
1490
1491        local_irq_restore(flags);
1492        return ret;
1493}
1494EXPORT_SYMBOL(queue_work_on);
1495
1496void delayed_work_timer_fn(unsigned long __data)
1497{
1498        struct delayed_work *dwork = (struct delayed_work *)__data;
1499
1500        /* should have been called from irqsafe timer with irq already off */
1501        __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1502}
1503EXPORT_SYMBOL(delayed_work_timer_fn);
1504
1505static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1506                                struct delayed_work *dwork, unsigned long delay)
1507{
1508        struct timer_list *timer = &dwork->timer;
1509        struct work_struct *work = &dwork->work;
1510
1511        WARN_ON_ONCE(!wq);
1512        WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1513                     timer->data != (unsigned long)dwork);
1514        WARN_ON_ONCE(timer_pending(timer));
1515        WARN_ON_ONCE(!list_empty(&work->entry));
1516
1517        /*
1518         * If @delay is 0, queue @dwork->work immediately.  This is for
1519         * both optimization and correctness.  The earliest @timer can
1520         * expire is on the closest next tick and delayed_work users depend
1521         * on that there's no such delay when @delay is 0.
1522         */
1523        if (!delay) {
1524                __queue_work(cpu, wq, &dwork->work);
1525                return;
1526        }
1527
1528        dwork->wq = wq;
1529        dwork->cpu = cpu;
1530        timer->expires = jiffies + delay;
1531
1532        if (unlikely(cpu != WORK_CPU_UNBOUND))
1533                add_timer_on(timer, cpu);
1534        else
1535                add_timer(timer);
1536}
1537
1538/**
1539 * queue_delayed_work_on - queue work on specific CPU after delay
1540 * @cpu: CPU number to execute work on
1541 * @wq: workqueue to use
1542 * @dwork: work to queue
1543 * @delay: number of jiffies to wait before queueing
1544 *
1545 * Return: %false if @work was already on a queue, %true otherwise.  If
1546 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1547 * execution.
1548 */
1549bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1550                           struct delayed_work *dwork, unsigned long delay)
1551{
1552        struct work_struct *work = &dwork->work;
1553        bool ret = false;
1554        unsigned long flags;
1555
1556        /* read the comment in __queue_work() */
1557        local_irq_save(flags);
1558
1559        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1560                __queue_delayed_work(cpu, wq, dwork, delay);
1561                ret = true;
1562        }
1563
1564        local_irq_restore(flags);
1565        return ret;
1566}
1567EXPORT_SYMBOL(queue_delayed_work_on);
1568
1569/**
1570 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1571 * @cpu: CPU number to execute work on
1572 * @wq: workqueue to use
1573 * @dwork: work to queue
1574 * @delay: number of jiffies to wait before queueing
1575 *
1576 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1577 * modify @dwork's timer so that it expires after @delay.  If @delay is
1578 * zero, @work is guaranteed to be scheduled immediately regardless of its
1579 * current state.
1580 *
1581 * Return: %false if @dwork was idle and queued, %true if @dwork was
1582 * pending and its timer was modified.
1583 *
1584 * This function is safe to call from any context including IRQ handler.
1585 * See try_to_grab_pending() for details.
1586 */
1587bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1588                         struct delayed_work *dwork, unsigned long delay)
1589{
1590        unsigned long flags;
1591        int ret;
1592
1593        do {
1594                ret = try_to_grab_pending(&dwork->work, true, &flags);
1595        } while (unlikely(ret == -EAGAIN));
1596
1597        if (likely(ret >= 0)) {
1598                __queue_delayed_work(cpu, wq, dwork, delay);
1599                local_irq_restore(flags);
1600        }
1601
1602        /* -ENOENT from try_to_grab_pending() becomes %true */
1603        return ret;
1604}
1605EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1606
1607/**
1608 * worker_enter_idle - enter idle state
1609 * @worker: worker which is entering idle state
1610 *
1611 * @worker is entering idle state.  Update stats and idle timer if
1612 * necessary.
1613 *
1614 * LOCKING:
1615 * spin_lock_irq(pool->lock).
1616 */
1617static void worker_enter_idle(struct worker *worker)
1618{
1619        struct worker_pool *pool = worker->pool;
1620
1621        if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1622            WARN_ON_ONCE(!list_empty(&worker->entry) &&
1623                         (worker->hentry.next || worker->hentry.pprev)))
1624                return;
1625
1626        /* can't use worker_set_flags(), also called from create_worker() */
1627        worker->flags |= WORKER_IDLE;
1628        pool->nr_idle++;
1629        worker->last_active = jiffies;
1630
1631        /* idle_list is LIFO */
1632        list_add(&worker->entry, &pool->idle_list);
1633
1634        if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1635                mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1636
1637        /*
1638         * Sanity check nr_running.  Because wq_unbind_fn() releases
1639         * pool->lock between setting %WORKER_UNBOUND and zapping
1640         * nr_running, the warning may trigger spuriously.  Check iff
1641         * unbind is not in progress.
1642         */
1643        WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1644                     pool->nr_workers == pool->nr_idle &&
1645                     atomic_read(&pool->nr_running));
1646}
1647
1648/**
1649 * worker_leave_idle - leave idle state
1650 * @worker: worker which is leaving idle state
1651 *
1652 * @worker is leaving idle state.  Update stats.
1653 *
1654 * LOCKING:
1655 * spin_lock_irq(pool->lock).
1656 */
1657static void worker_leave_idle(struct worker *worker)
1658{
1659        struct worker_pool *pool = worker->pool;
1660
1661        if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1662                return;
1663        worker_clr_flags(worker, WORKER_IDLE);
1664        pool->nr_idle--;
1665        list_del_init(&worker->entry);
1666}
1667
1668static struct worker *alloc_worker(int node)
1669{
1670        struct worker *worker;
1671
1672        worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1673        if (worker) {
1674                INIT_LIST_HEAD(&worker->entry);
1675                INIT_LIST_HEAD(&worker->scheduled);
1676                INIT_LIST_HEAD(&worker->node);
1677                /* on creation a worker is in !idle && prep state */
1678                worker->flags = WORKER_PREP;
1679        }
1680        return worker;
1681}
1682
1683/**
1684 * worker_attach_to_pool() - attach a worker to a pool
1685 * @worker: worker to be attached
1686 * @pool: the target pool
1687 *
1688 * Attach @worker to @pool.  Once attached, the %WORKER_UNBOUND flag and
1689 * cpu-binding of @worker are kept coordinated with the pool across
1690 * cpu-[un]hotplugs.
1691 */
1692static void worker_attach_to_pool(struct worker *worker,
1693                                   struct worker_pool *pool)
1694{
1695        mutex_lock(&pool->attach_mutex);
1696
1697        /*
1698         * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1699         * online CPUs.  It'll be re-applied when any of the CPUs come up.
1700         */
1701        set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1702
1703        /*
1704         * The pool->attach_mutex ensures %POOL_DISASSOCIATED remains
1705         * stable across this function.  See the comments above the
1706         * flag definition for details.
1707         */
1708        if (pool->flags & POOL_DISASSOCIATED)
1709                worker->flags |= WORKER_UNBOUND;
1710
1711        list_add_tail(&worker->node, &pool->workers);
1712
1713        mutex_unlock(&pool->attach_mutex);
1714}
1715
1716/**
1717 * worker_detach_from_pool() - detach a worker from its pool
1718 * @worker: worker which is attached to its pool
1719 * @pool: the pool @worker is attached to
1720 *
1721 * Undo the attaching which had been done in worker_attach_to_pool().  The
1722 * caller worker shouldn't access to the pool after detached except it has
1723 * other reference to the pool.
1724 */
1725static void worker_detach_from_pool(struct worker *worker,
1726                                    struct worker_pool *pool)
1727{
1728        struct completion *detach_completion = NULL;
1729
1730        mutex_lock(&pool->attach_mutex);
1731        list_del(&worker->node);
1732        if (list_empty(&pool->workers))
1733                detach_completion = pool->detach_completion;
1734        mutex_unlock(&pool->attach_mutex);
1735
1736        /* clear leftover flags without pool->lock after it is detached */
1737        worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1738
1739        if (detach_completion)
1740                complete(detach_completion);
1741}
1742
1743/**
1744 * create_worker - create a new workqueue worker
1745 * @pool: pool the new worker will belong to
1746 *
1747 * Create and start a new worker which is attached to @pool.
1748 *
1749 * CONTEXT:
1750 * Might sleep.  Does GFP_KERNEL allocations.
1751 *
1752 * Return:
1753 * Pointer to the newly created worker.
1754 */
1755static struct worker *create_worker(struct worker_pool *pool)
1756{
1757        struct worker *worker = NULL;
1758        int id = -1;
1759        char id_buf[16];
1760
1761        /* ID is needed to determine kthread name */
1762        id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
1763        if (id < 0)
1764                goto fail;
1765
1766        worker = alloc_worker(pool->node);
1767        if (!worker)
1768                goto fail;
1769
1770        worker->pool = pool;
1771        worker->id = id;
1772
1773        if (pool->cpu >= 0)
1774                snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1775                         pool->attrs->nice < 0  ? "H" : "");
1776        else
1777                snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1778
1779        worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1780                                              "kworker/%s", id_buf);
1781        if (IS_ERR(worker->task))
1782                goto fail;
1783
1784        set_user_nice(worker->task, pool->attrs->nice);
1785        kthread_bind_mask(worker->task, pool->attrs->cpumask);
1786
1787        /* successful, attach the worker to the pool */
1788        worker_attach_to_pool(worker, pool);
1789
1790        /* start the newly created worker */
1791        spin_lock_irq(&pool->lock);
1792        worker->pool->nr_workers++;
1793        worker_enter_idle(worker);
1794        wake_up_process(worker->task);
1795        spin_unlock_irq(&pool->lock);
1796
1797        return worker;
1798
1799fail:
1800        if (id >= 0)
1801                ida_simple_remove(&pool->worker_ida, id);
1802        kfree(worker);
1803        return NULL;
1804}
1805
1806/**
1807 * destroy_worker - destroy a workqueue worker
1808 * @worker: worker to be destroyed
1809 *
1810 * Destroy @worker and adjust @pool stats accordingly.  The worker should
1811 * be idle.
1812 *
1813 * CONTEXT:
1814 * spin_lock_irq(pool->lock).
1815 */
1816static void destroy_worker(struct worker *worker)
1817{
1818        struct worker_pool *pool = worker->pool;
1819
1820        lockdep_assert_held(&pool->lock);
1821
1822        /* sanity check frenzy */
1823        if (WARN_ON(worker->current_work) ||
1824            WARN_ON(!list_empty(&worker->scheduled)) ||
1825            WARN_ON(!(worker->flags & WORKER_IDLE)))
1826                return;
1827
1828        pool->nr_workers--;
1829        pool->nr_idle--;
1830
1831        list_del_init(&worker->entry);
1832        worker->flags |= WORKER_DIE;
1833        wake_up_process(worker->task);
1834}
1835
1836static void idle_worker_timeout(unsigned long __pool)
1837{
1838        struct worker_pool *pool = (void *)__pool;
1839
1840        spin_lock_irq(&pool->lock);
1841
1842        while (too_many_workers(pool)) {
1843                struct worker *worker;
1844                unsigned long expires;
1845
1846                /* idle_list is kept in LIFO order, check the last one */
1847                worker = list_entry(pool->idle_list.prev, struct worker, entry);
1848                expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1849
1850                if (time_before(jiffies, expires)) {
1851                        mod_timer(&pool->idle_timer, expires);
1852                        break;
1853                }
1854
1855                destroy_worker(worker);
1856        }
1857
1858        spin_unlock_irq(&pool->lock);
1859}
1860
1861static void send_mayday(struct work_struct *work)
1862{
1863        struct pool_workqueue *pwq = get_work_pwq(work);
1864        struct workqueue_struct *wq = pwq->wq;
1865
1866        lockdep_assert_held(&wq_mayday_lock);
1867
1868        if (!wq->rescuer)
1869                return;
1870
1871        /* mayday mayday mayday */
1872        if (list_empty(&pwq->mayday_node)) {
1873                /*
1874                 * If @pwq is for an unbound wq, its base ref may be put at
1875                 * any time due to an attribute change.  Pin @pwq until the
1876                 * rescuer is done with it.
1877                 */
1878                get_pwq(pwq);
1879                list_add_tail(&pwq->mayday_node, &wq->maydays);
1880                wake_up_process(wq->rescuer->task);
1881        }
1882}
1883
1884static void pool_mayday_timeout(unsigned long __pool)
1885{
1886        struct worker_pool *pool = (void *)__pool;
1887        struct work_struct *work;
1888
1889        spin_lock_irq(&pool->lock);
1890        spin_lock(&wq_mayday_lock);             /* for wq->maydays */
1891
1892        if (need_to_create_worker(pool)) {
1893                /*
1894                 * We've been trying to create a new worker but
1895                 * haven't been successful.  We might be hitting an
1896                 * allocation deadlock.  Send distress signals to
1897                 * rescuers.
1898                 */
1899                list_for_each_entry(work, &pool->worklist, entry)
1900                        send_mayday(work);
1901        }
1902
1903        spin_unlock(&wq_mayday_lock);
1904        spin_unlock_irq(&pool->lock);
1905
1906        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1907}
1908
1909/**
1910 * maybe_create_worker - create a new worker if necessary
1911 * @pool: pool to create a new worker for
1912 *
1913 * Create a new worker for @pool if necessary.  @pool is guaranteed to
1914 * have at least one idle worker on return from this function.  If
1915 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1916 * sent to all rescuers with works scheduled on @pool to resolve
1917 * possible allocation deadlock.
1918 *
1919 * On return, need_to_create_worker() is guaranteed to be %false and
1920 * may_start_working() %true.
1921 *
1922 * LOCKING:
1923 * spin_lock_irq(pool->lock) which may be released and regrabbed
1924 * multiple times.  Does GFP_KERNEL allocations.  Called only from
1925 * manager.
1926 */
1927static void maybe_create_worker(struct worker_pool *pool)
1928__releases(&pool->lock)
1929__acquires(&pool->lock)
1930{
1931restart:
1932        spin_unlock_irq(&pool->lock);
1933
1934        /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1935        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1936
1937        while (true) {
1938                if (create_worker(pool) || !need_to_create_worker(pool))
1939                        break;
1940
1941                schedule_timeout_interruptible(CREATE_COOLDOWN);
1942
1943                if (!need_to_create_worker(pool))
1944                        break;
1945        }
1946
1947        del_timer_sync(&pool->mayday_timer);
1948        spin_lock_irq(&pool->lock);
1949        /*
1950         * This is necessary even after a new worker was just successfully
1951         * created as @pool->lock was dropped and the new worker might have
1952         * already become busy.
1953         */
1954        if (need_to_create_worker(pool))
1955                goto restart;
1956}
1957
1958/**
1959 * manage_workers - manage worker pool
1960 * @worker: self
1961 *
1962 * Assume the manager role and manage the worker pool @worker belongs
1963 * to.  At any given time, there can be only zero or one manager per
1964 * pool.  The exclusion is handled automatically by this function.
1965 *
1966 * The caller can safely start processing works on false return.  On
1967 * true return, it's guaranteed that need_to_create_worker() is false
1968 * and may_start_working() is true.
1969 *
1970 * CONTEXT:
1971 * spin_lock_irq(pool->lock) which may be released and regrabbed
1972 * multiple times.  Does GFP_KERNEL allocations.
1973 *
1974 * Return:
1975 * %false if the pool doesn't need management and the caller can safely
1976 * start processing works, %true if management function was performed and
1977 * the conditions that the caller verified before calling the function may
1978 * no longer be true.
1979 */
1980static bool manage_workers(struct worker *worker)
1981{
1982        struct worker_pool *pool = worker->pool;
1983
1984        if (pool->flags & POOL_MANAGER_ACTIVE)
1985                return false;
1986
1987        pool->flags |= POOL_MANAGER_ACTIVE;
1988        pool->manager = worker;
1989
1990        maybe_create_worker(pool);
1991
1992        pool->manager = NULL;
1993        pool->flags &= ~POOL_MANAGER_ACTIVE;
1994        wake_up(&wq_manager_wait);
1995        return true;
1996}
1997
1998/**
1999 * process_one_work - process single work
2000 * @worker: self
2001 * @work: work to process
2002 *
2003 * Process @work.  This function contains all the logics necessary to
2004 * process a single work including synchronization against and
2005 * interaction with other workers on the same cpu, queueing and
2006 * flushing.  As long as context requirement is met, any worker can
2007 * call this function to process a work.
2008 *
2009 * CONTEXT:
2010 * spin_lock_irq(pool->lock) which is released and regrabbed.
2011 */
2012static void process_one_work(struct worker *worker, struct work_struct *work)
2013__releases(&pool->lock)
2014__acquires(&pool->lock)
2015{
2016        struct pool_workqueue *pwq = get_work_pwq(work);
2017        struct worker_pool *pool = worker->pool;
2018        bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2019        int work_color;
2020        struct worker *collision;
2021#ifdef CONFIG_LOCKDEP
2022        /*
2023         * It is permissible to free the struct work_struct from
2024         * inside the function that is called from it, this we need to
2025         * take into account for lockdep too.  To avoid bogus "held
2026         * lock freed" warnings as well as problems when looking into
2027         * work->lockdep_map, make a copy and use that here.
2028         */
2029        struct lockdep_map lockdep_map;
2030
2031        lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2032#endif
2033        /* ensure we're on the correct CPU */
2034        WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2035                     raw_smp_processor_id() != pool->cpu);
2036
2037        /*
2038         * A single work shouldn't be executed concurrently by
2039         * multiple workers on a single cpu.  Check whether anyone is
2040         * already processing the work.  If so, defer the work to the
2041         * currently executing one.
2042         */
2043        collision = find_worker_executing_work(pool, work);
2044        if (unlikely(collision)) {
2045                move_linked_works(work, &collision->scheduled, NULL);
2046                return;
2047        }
2048
2049        /* claim and dequeue */
2050        debug_work_deactivate(work);
2051        hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2052        worker->current_work = work;
2053        worker->current_func = work->func;
2054        worker->current_pwq = pwq;
2055        work_color = get_work_color(work);
2056
2057        list_del_init(&work->entry);
2058
2059        /*
2060         * CPU intensive works don't participate in concurrency management.
2061         * They're the scheduler's responsibility.  This takes @worker out
2062         * of concurrency management and the next code block will chain
2063         * execution of the pending work items.
2064         */
2065        if (unlikely(cpu_intensive))
2066                worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2067
2068        /*
2069         * Wake up another worker if necessary.  The condition is always
2070         * false for normal per-cpu workers since nr_running would always
2071         * be >= 1 at this point.  This is used to chain execution of the
2072         * pending work items for WORKER_NOT_RUNNING workers such as the
2073         * UNBOUND and CPU_INTENSIVE ones.
2074         */
2075        if (need_more_worker(pool))
2076                wake_up_worker(pool);
2077
2078        /*
2079         * Record the last pool and clear PENDING which should be the last
2080         * update to @work.  Also, do this inside @pool->lock so that
2081         * PENDING and queued state changes happen together while IRQ is
2082         * disabled.
2083         */
2084        set_work_pool_and_clear_pending(work, pool->id);
2085
2086        spin_unlock_irq(&pool->lock);
2087
2088        lock_map_acquire(&pwq->wq->lockdep_map);
2089        lock_map_acquire(&lockdep_map);
2090        /*
2091         * Strictly speaking we should mark the invariant state without holding
2092         * any locks, that is, before these two lock_map_acquire()'s.
2093         *
2094         * However, that would result in:
2095         *
2096         *   A(W1)
2097         *   WFC(C)
2098         *              A(W1)
2099         *              C(C)
2100         *
2101         * Which would create W1->C->W1 dependencies, even though there is no
2102         * actual deadlock possible. There are two solutions, using a
2103         * read-recursive acquire on the work(queue) 'locks', but this will then
2104         * hit the lockdep limitation on recursive locks, or simply discard
2105         * these locks.
2106         *
2107         * AFAICT there is no possible deadlock scenario between the
2108         * flush_work() and complete() primitives (except for single-threaded
2109         * workqueues), so hiding them isn't a problem.
2110         */
2111        lockdep_invariant_state(true);
2112        trace_workqueue_execute_start(work);
2113        worker->current_func(work);
2114        /*
2115         * While we must be careful to not use "work" after this, the trace
2116         * point will only record its address.
2117         */
2118        trace_workqueue_execute_end(work);
2119        lock_map_release(&lockdep_map);
2120        lock_map_release(&pwq->wq->lockdep_map);
2121
2122        if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2123                pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2124                       "     last function: %pf\n",
2125                       current->comm, preempt_count(), task_pid_nr(current),
2126                       worker->current_func);
2127                debug_show_held_locks(current);
2128                dump_stack();
2129        }
2130
2131        /*
2132         * The following prevents a kworker from hogging CPU on !PREEMPT
2133         * kernels, where a requeueing work item waiting for something to
2134         * happen could deadlock with stop_machine as such work item could
2135         * indefinitely requeue itself while all other CPUs are trapped in
2136         * stop_machine. At the same time, report a quiescent RCU state so
2137         * the same condition doesn't freeze RCU.
2138         */
2139        cond_resched_rcu_qs();
2140
2141        spin_lock_irq(&pool->lock);
2142
2143        /* clear cpu intensive status */
2144        if (unlikely(cpu_intensive))
2145                worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2146
2147        /* we're done with it, release */
2148        hash_del(&worker->hentry);
2149        worker->current_work = NULL;
2150        worker->current_func = NULL;
2151        worker->current_pwq = NULL;
2152        worker->desc_valid = false;
2153        pwq_dec_nr_in_flight(pwq, work_color);
2154}
2155
2156/**
2157 * process_scheduled_works - process scheduled works
2158 * @worker: self
2159 *
2160 * Process all scheduled works.  Please note that the scheduled list
2161 * may change while processing a work, so this function repeatedly
2162 * fetches a work from the top and executes it.
2163 *
2164 * CONTEXT:
2165 * spin_lock_irq(pool->lock) which may be released and regrabbed
2166 * multiple times.
2167 */
2168static void process_scheduled_works(struct worker *worker)
2169{
2170        while (!list_empty(&worker->scheduled)) {
2171                struct work_struct *work = list_first_entry(&worker->scheduled,
2172                                                struct work_struct, entry);
2173                process_one_work(worker, work);
2174        }
2175}
2176
2177/**
2178 * worker_thread - the worker thread function
2179 * @__worker: self
2180 *
2181 * The worker thread function.  All workers belong to a worker_pool -
2182 * either a per-cpu one or dynamic unbound one.  These workers process all
2183 * work items regardless of their specific target workqueue.  The only
2184 * exception is work items which belong to workqueues with a rescuer which
2185 * will be explained in rescuer_thread().
2186 *
2187 * Return: 0
2188 */
2189static int worker_thread(void *__worker)
2190{
2191        struct worker *worker = __worker;
2192        struct worker_pool *pool = worker->pool;
2193
2194        /* tell the scheduler that this is a workqueue worker */
2195        worker->task->flags |= PF_WQ_WORKER;
2196woke_up:
2197        spin_lock_irq(&pool->lock);
2198
2199        /* am I supposed to die? */
2200        if (unlikely(worker->flags & WORKER_DIE)) {
2201                spin_unlock_irq(&pool->lock);
2202                WARN_ON_ONCE(!list_empty(&worker->entry));
2203                worker->task->flags &= ~PF_WQ_WORKER;
2204
2205                set_task_comm(worker->task, "kworker/dying");
2206                ida_simple_remove(&pool->worker_ida, worker->id);
2207                worker_detach_from_pool(worker, pool);
2208                kfree(worker);
2209                return 0;
2210        }
2211
2212        worker_leave_idle(worker);
2213recheck:
2214        /* no more worker necessary? */
2215        if (!need_more_worker(pool))
2216                goto sleep;
2217
2218        /* do we need to manage? */
2219        if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2220                goto recheck;
2221
2222        /*
2223         * ->scheduled list can only be filled while a worker is
2224         * preparing to process a work or actually processing it.
2225         * Make sure nobody diddled with it while I was sleeping.
2226         */
2227        WARN_ON_ONCE(!list_empty(&worker->scheduled));
2228
2229        /*
2230         * Finish PREP stage.  We're guaranteed to have at least one idle
2231         * worker or that someone else has already assumed the manager
2232         * role.  This is where @worker starts participating in concurrency
2233         * management if applicable and concurrency management is restored
2234         * after being rebound.  See rebind_workers() for details.
2235         */
2236        worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2237
2238        do {
2239                struct work_struct *work =
2240                        list_first_entry(&pool->worklist,
2241                                         struct work_struct, entry);
2242
2243                pool->watchdog_ts = jiffies;
2244
2245                if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2246                        /* optimization path, not strictly necessary */
2247                        process_one_work(worker, work);
2248                        if (unlikely(!list_empty(&worker->scheduled)))
2249                                process_scheduled_works(worker);
2250                } else {
2251                        move_linked_works(work, &worker->scheduled, NULL);
2252                        process_scheduled_works(worker);
2253                }
2254        } while (keep_working(pool));
2255
2256        worker_set_flags(worker, WORKER_PREP);
2257sleep:
2258        /*
2259         * pool->lock is held and there's no work to process and no need to
2260         * manage, sleep.  Workers are woken up only while holding
2261         * pool->lock or from local cpu, so setting the current state
2262         * before releasing pool->lock is enough to prevent losing any
2263         * event.
2264         */
2265        worker_enter_idle(worker);
2266        __set_current_state(TASK_IDLE);
2267        spin_unlock_irq(&pool->lock);
2268        schedule();
2269        goto woke_up;
2270}
2271
2272/**
2273 * rescuer_thread - the rescuer thread function
2274 * @__rescuer: self
2275 *
2276 * Workqueue rescuer thread function.  There's one rescuer for each
2277 * workqueue which has WQ_MEM_RECLAIM set.
2278 *
2279 * Regular work processing on a pool may block trying to create a new
2280 * worker which uses GFP_KERNEL allocation which has slight chance of
2281 * developing into deadlock if some works currently on the same queue
2282 * need to be processed to satisfy the GFP_KERNEL allocation.  This is
2283 * the problem rescuer solves.
2284 *
2285 * When such condition is possible, the pool summons rescuers of all
2286 * workqueues which have works queued on the pool and let them process
2287 * those works so that forward progress can be guaranteed.
2288 *
2289 * This should happen rarely.
2290 *
2291 * Return: 0
2292 */
2293static int rescuer_thread(void *__rescuer)
2294{
2295        struct worker *rescuer = __rescuer;
2296        struct workqueue_struct *wq = rescuer->rescue_wq;
2297        struct list_head *scheduled = &rescuer->scheduled;
2298        bool should_stop;
2299
2300        set_user_nice(current, RESCUER_NICE_LEVEL);
2301
2302        /*
2303         * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
2304         * doesn't participate in concurrency management.
2305         */
2306        rescuer->task->flags |= PF_WQ_WORKER;
2307repeat:
2308        set_current_state(TASK_IDLE);
2309
2310        /*
2311         * By the time the rescuer is requested to stop, the workqueue
2312         * shouldn't have any work pending, but @wq->maydays may still have
2313         * pwq(s) queued.  This can happen by non-rescuer workers consuming
2314         * all the work items before the rescuer got to them.  Go through
2315         * @wq->maydays processing before acting on should_stop so that the
2316         * list is always empty on exit.
2317         */
2318        should_stop = kthread_should_stop();
2319
2320        /* see whether any pwq is asking for help */
2321        spin_lock_irq(&wq_mayday_lock);
2322
2323        while (!list_empty(&wq->maydays)) {
2324                struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2325                                        struct pool_workqueue, mayday_node);
2326                struct worker_pool *pool = pwq->pool;
2327                struct work_struct *work, *n;
2328                bool first = true;
2329
2330                __set_current_state(TASK_RUNNING);
2331                list_del_init(&pwq->mayday_node);
2332
2333                spin_unlock_irq(&wq_mayday_lock);
2334
2335                worker_attach_to_pool(rescuer, pool);
2336
2337                spin_lock_irq(&pool->lock);
2338                rescuer->pool = pool;
2339
2340                /*
2341                 * Slurp in all works issued via this workqueue and
2342                 * process'em.
2343                 */
2344                WARN_ON_ONCE(!list_empty(scheduled));
2345                list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2346                        if (get_work_pwq(work) == pwq) {
2347                                if (first)
2348                                        pool->watchdog_ts = jiffies;
2349                                move_linked_works(work, scheduled, &n);
2350                        }
2351                        first = false;
2352                }
2353
2354                if (!list_empty(scheduled)) {
2355                        process_scheduled_works(rescuer);
2356
2357                        /*
2358                         * The above execution of rescued work items could
2359                         * have created more to rescue through
2360                         * pwq_activate_first_delayed() or chained
2361                         * queueing.  Let's put @pwq back on mayday list so
2362                         * that such back-to-back work items, which may be
2363                         * being used to relieve memory pressure, don't
2364                         * incur MAYDAY_INTERVAL delay inbetween.
2365                         */
2366                        if (need_to_create_worker(pool)) {
2367                                spin_lock(&wq_mayday_lock);
2368                                get_pwq(pwq);
2369                                list_move_tail(&pwq->mayday_node, &wq->maydays);
2370                                spin_unlock(&wq_mayday_lock);
2371                        }
2372                }
2373
2374                /*
2375                 * Put the reference grabbed by send_mayday().  @pool won't
2376                 * go away while we're still attached to it.
2377                 */
2378                put_pwq(pwq);
2379
2380                /*
2381                 * Leave this pool.  If need_more_worker() is %true, notify a
2382                 * regular worker; otherwise, we end up with 0 concurrency
2383                 * and stalling the execution.
2384                 */
2385                if (need_more_worker(pool))
2386                        wake_up_worker(pool);
2387
2388                rescuer->pool = NULL;
2389                spin_unlock_irq(&pool->lock);
2390
2391                worker_detach_from_pool(rescuer, pool);
2392
2393                spin_lock_irq(&wq_mayday_lock);
2394        }
2395
2396        spin_unlock_irq(&wq_mayday_lock);
2397
2398        if (should_stop) {
2399                __set_current_state(TASK_RUNNING);
2400                rescuer->task->flags &= ~PF_WQ_WORKER;
2401                return 0;
2402        }
2403
2404        /* rescuers should never participate in concurrency management */
2405        WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2406        schedule();
2407        goto repeat;
2408}
2409
2410/**
2411 * check_flush_dependency - check for flush dependency sanity
2412 * @target_wq: workqueue being flushed
2413 * @target_work: work item being flushed (NULL for workqueue flushes)
2414 *
2415 * %current is trying to flush the whole @target_wq or @target_work on it.
2416 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2417 * reclaiming memory or running on a workqueue which doesn't have
2418 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2419 * a deadlock.
2420 */
2421static void check_flush_dependency(struct workqueue_struct *target_wq,
2422                                   struct work_struct *target_work)
2423{
2424        work_func_t target_func = target_work ? target_work->func : NULL;
2425        struct worker *worker;
2426
2427        if (target_wq->flags & WQ_MEM_RECLAIM)
2428                return;
2429
2430        worker = current_wq_worker();
2431
2432        WARN_ONCE(current->flags & PF_MEMALLOC,
2433                  "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf",
2434                  current->pid, current->comm, target_wq->name, target_func);
2435        WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2436                              (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2437                  "workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf",
2438                  worker->current_pwq->wq->name, worker->current_func,
2439                  target_wq->name, target_func);
2440}
2441
2442struct wq_barrier {
2443        struct work_struct      work;
2444        struct completion       done;
2445        struct task_struct      *task;  /* purely informational */
2446};
2447
2448static void wq_barrier_func(struct work_struct *work)
2449{
2450        struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2451        complete(&barr->done);
2452}
2453
2454/**
2455 * insert_wq_barrier - insert a barrier work
2456 * @pwq: pwq to insert barrier into
2457 * @barr: wq_barrier to insert
2458 * @target: target work to attach @barr to
2459 * @worker: worker currently executing @target, NULL if @target is not executing
2460 *
2461 * @barr is linked to @target such that @barr is completed only after
2462 * @target finishes execution.  Please note that the ordering
2463 * guarantee is observed only with respect to @target and on the local
2464 * cpu.
2465 *
2466 * Currently, a queued barrier can't be canceled.  This is because
2467 * try_to_grab_pending() can't determine whether the work to be
2468 * grabbed is at the head of the queue and thus can't clear LINKED
2469 * flag of the previous work while there must be a valid next work
2470 * after a work with LINKED flag set.
2471 *
2472 * Note that when @worker is non-NULL, @target may be modified
2473 * underneath us, so we can't reliably determine pwq from @target.
2474 *
2475 * CONTEXT:
2476 * spin_lock_irq(pool->lock).
2477 */
2478static void insert_wq_barrier(struct pool_workqueue *pwq,
2479                              struct wq_barrier *barr,
2480                              struct work_struct *target, struct worker *worker)
2481{
2482        struct list_head *head;
2483        unsigned int linked = 0;
2484
2485        /*
2486         * debugobject calls are safe here even with pool->lock locked
2487         * as we know for sure that this will not trigger any of the
2488         * checks and call back into the fixup functions where we
2489         * might deadlock.
2490         */
2491        INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2492        __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2493
2494        /*
2495         * Explicitly init the crosslock for wq_barrier::done, make its lock
2496         * key a subkey of the corresponding work. As a result we won't
2497         * build a dependency between wq_barrier::done and unrelated work.
2498         */
2499        lockdep_init_map_crosslock((struct lockdep_map *)&barr->done.map,
2500                                   "(complete)wq_barr::done",
2501                                   target->lockdep_map.key, 1);
2502        __init_completion(&barr->done);
2503        barr->task = current;
2504
2505        /*
2506         * If @target is currently being executed, schedule the
2507         * barrier to the worker; otherwise, put it after @target.
2508         */
2509        if (worker)
2510                head = worker->scheduled.next;
2511        else {
2512                unsigned long *bits = work_data_bits(target);
2513
2514                head = target->entry.next;
2515                /* there can already be other linked works, inherit and set */
2516                linked = *bits & WORK_STRUCT_LINKED;
2517                __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2518        }
2519
2520        debug_work_activate(&barr->work);
2521        insert_work(pwq, &barr->work, head,
2522                    work_color_to_flags(WORK_NO_COLOR) | linked);
2523}
2524
2525/**
2526 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2527 * @wq: workqueue being flushed
2528 * @flush_color: new flush color, < 0 for no-op
2529 * @work_color: new work color, < 0 for no-op
2530 *
2531 * Prepare pwqs for workqueue flushing.
2532 *
2533 * If @flush_color is non-negative, flush_color on all pwqs should be
2534 * -1.  If no pwq has in-flight commands at the specified color, all
2535 * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
2536 * has in flight commands, its pwq->flush_color is set to
2537 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2538 * wakeup logic is armed and %true is returned.
2539 *
2540 * The caller should have initialized @wq->first_flusher prior to
2541 * calling this function with non-negative @flush_color.  If
2542 * @flush_color is negative, no flush color update is done and %false
2543 * is returned.
2544 *
2545 * If @work_color is non-negative, all pwqs should have the same
2546 * work_color which is previous to @work_color and all will be
2547 * advanced to @work_color.
2548 *
2549 * CONTEXT:
2550 * mutex_lock(wq->mutex).
2551 *
2552 * Return:
2553 * %true if @flush_color >= 0 and there's something to flush.  %false
2554 * otherwise.
2555 */
2556static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2557                                      int flush_color, int work_color)
2558{
2559        bool wait = false;
2560        struct pool_workqueue *pwq;
2561
2562        if (flush_color >= 0) {
2563                WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2564                atomic_set(&wq->nr_pwqs_to_flush, 1);
2565        }
2566
2567        for_each_pwq(pwq, wq) {
2568                struct worker_pool *pool = pwq->pool;
2569
2570                spin_lock_irq(&pool->lock);
2571
2572                if (flush_color >= 0) {
2573                        WARN_ON_ONCE(pwq->flush_color != -1);
2574
2575                        if (pwq->nr_in_flight[flush_color]) {
2576                                pwq->flush_color = flush_color;
2577                                atomic_inc(&wq->nr_pwqs_to_flush);
2578                                wait = true;
2579                        }
2580                }
2581
2582                if (work_color >= 0) {
2583                        WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2584                        pwq->work_color = work_color;
2585                }
2586
2587                spin_unlock_irq(&pool->lock);
2588        }
2589
2590        if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2591                complete(&wq->first_flusher->done);
2592
2593        return wait;
2594}
2595
2596/**
2597 * flush_workqueue - ensure that any scheduled work has run to completion.
2598 * @wq: workqueue to flush
2599 *
2600 * This function sleeps until all work items which were queued on entry
2601 * have finished execution, but it is not livelocked by new incoming ones.
2602 */
2603void flush_workqueue(struct workqueue_struct *wq)
2604{
2605        struct wq_flusher this_flusher = {
2606                .list = LIST_HEAD_INIT(this_flusher.list),
2607                .flush_color = -1,
2608                .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2609        };
2610        int next_color;
2611
2612        if (WARN_ON(!wq_online))
2613                return;
2614
2615        lock_map_acquire(&wq->lockdep_map);
2616        lock_map_release(&wq->lockdep_map);
2617
2618        mutex_lock(&wq->mutex);
2619
2620        /*
2621         * Start-to-wait phase
2622         */
2623        next_color = work_next_color(wq->work_color);
2624
2625        if (next_color != wq->flush_color) {
2626                /*
2627                 * Color space is not full.  The current work_color
2628                 * becomes our flush_color and work_color is advanced
2629                 * by one.
2630                 */
2631                WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2632                this_flusher.flush_color = wq->work_color;
2633                wq->work_color = next_color;
2634
2635                if (!wq->first_flusher) {
2636                        /* no flush in progress, become the first flusher */
2637                        WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2638
2639                        wq->first_flusher = &this_flusher;
2640
2641                        if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2642                                                       wq->work_color)) {
2643                                /* nothing to flush, done */
2644                                wq->flush_color = next_color;
2645                                wq->first_flusher = NULL;
2646                                goto out_unlock;
2647                        }
2648                } else {
2649                        /* wait in queue */
2650                        WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2651                        list_add_tail(&this_flusher.list, &wq->flusher_queue);
2652                        flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2653                }
2654        } else {
2655                /*
2656                 * Oops, color space is full, wait on overflow queue.
2657                 * The next flush completion will assign us
2658                 * flush_color and transfer to flusher_queue.
2659                 */
2660                list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2661        }
2662
2663        check_flush_dependency(wq, NULL);
2664
2665        mutex_unlock(&wq->mutex);
2666
2667        wait_for_completion(&this_flusher.done);
2668
2669        /*
2670         * Wake-up-and-cascade phase
2671         *
2672         * First flushers are responsible for cascading flushes and
2673         * handling overflow.  Non-first flushers can simply return.
2674         */
2675        if (wq->first_flusher != &this_flusher)
2676                return;
2677
2678        mutex_lock(&wq->mutex);
2679
2680        /* we might have raced, check again with mutex held */
2681        if (wq->first_flusher != &this_flusher)
2682                goto out_unlock;
2683
2684        wq->first_flusher = NULL;
2685
2686        WARN_ON_ONCE(!list_empty(&this_flusher.list));
2687        WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2688
2689        while (true) {
2690                struct wq_flusher *next, *tmp;
2691
2692                /* complete all the flushers sharing the current flush color */
2693                list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2694                        if (next->flush_color != wq->flush_color)
2695                                break;
2696                        list_del_init(&next->list);
2697                        complete(&next->done);
2698                }
2699
2700                WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2701                             wq->flush_color != work_next_color(wq->work_color));
2702
2703                /* this flush_color is finished, advance by one */
2704                wq->flush_color = work_next_color(wq->flush_color);
2705
2706                /* one color has been freed, handle overflow queue */
2707                if (!list_empty(&wq->flusher_overflow)) {
2708                        /*
2709                         * Assign the same color to all overflowed
2710                         * flushers, advance work_color and append to
2711                         * flusher_queue.  This is the start-to-wait
2712                         * phase for these overflowed flushers.
2713                         */
2714                        list_for_each_entry(tmp, &wq->flusher_overflow, list)
2715                                tmp->flush_color = wq->work_color;
2716
2717                        wq->work_color = work_next_color(wq->work_color);
2718
2719                        list_splice_tail_init(&wq->flusher_overflow,
2720                                              &wq->flusher_queue);
2721                        flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2722                }
2723
2724                if (list_empty(&wq->flusher_queue)) {
2725                        WARN_ON_ONCE(wq->flush_color != wq->work_color);
2726                        break;
2727                }
2728
2729                /*
2730                 * Need to flush more colors.  Make the next flusher
2731                 * the new first flusher and arm pwqs.
2732                 */
2733                WARN_ON_ONCE(wq->flush_color == wq->work_color);
2734                WARN_ON_ONCE(wq->flush_color != next->flush_color);
2735
2736                list_del_init(&next->list);
2737                wq->first_flusher = next;
2738
2739                if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2740                        break;
2741
2742                /*
2743                 * Meh... this color is already done, clear first
2744                 * flusher and repeat cascading.
2745                 */
2746                wq->first_flusher = NULL;
2747        }
2748
2749out_unlock:
2750        mutex_unlock(&wq->mutex);
2751}
2752EXPORT_SYMBOL(flush_workqueue);
2753
2754/**
2755 * drain_workqueue - drain a workqueue
2756 * @wq: workqueue to drain
2757 *
2758 * Wait until the workqueue becomes empty.  While draining is in progress,
2759 * only chain queueing is allowed.  IOW, only currently pending or running
2760 * work items on @wq can queue further work items on it.  @wq is flushed
2761 * repeatedly until it becomes empty.  The number of flushing is determined
2762 * by the depth of chaining and should be relatively short.  Whine if it
2763 * takes too long.
2764 */
2765void drain_workqueue(struct workqueue_struct *wq)
2766{
2767        unsigned int flush_cnt = 0;
2768        struct pool_workqueue *pwq;
2769
2770        /*
2771         * __queue_work() needs to test whether there are drainers, is much
2772         * hotter than drain_workqueue() and already looks at @wq->flags.
2773         * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2774         */
2775        mutex_lock(&wq->mutex);
2776        if (!wq->nr_drainers++)
2777                wq->flags |= __WQ_DRAINING;
2778        mutex_unlock(&wq->mutex);
2779reflush:
2780        flush_workqueue(wq);
2781
2782        mutex_lock(&wq->mutex);
2783
2784        for_each_pwq(pwq, wq) {
2785                bool drained;
2786
2787                spin_lock_irq(&pwq->pool->lock);
2788                drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2789                spin_unlock_irq(&pwq->pool->lock);
2790
2791                if (drained)
2792                        continue;
2793
2794                if (++flush_cnt == 10 ||
2795                    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2796                        pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
2797                                wq->name, flush_cnt);
2798
2799                mutex_unlock(&wq->mutex);
2800                goto reflush;
2801        }
2802
2803        if (!--wq->nr_drainers)
2804                wq->flags &= ~__WQ_DRAINING;
2805        mutex_unlock(&wq->mutex);
2806}
2807EXPORT_SYMBOL_GPL(drain_workqueue);
2808
2809static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2810{
2811        struct worker *worker = NULL;
2812        struct worker_pool *pool;
2813        struct pool_workqueue *pwq;
2814
2815        might_sleep();
2816
2817        local_irq_disable();
2818        pool = get_work_pool(work);
2819        if (!pool) {
2820                local_irq_enable();
2821                return false;
2822        }
2823
2824        spin_lock(&pool->lock);
2825        /* see the comment in try_to_grab_pending() with the same code */
2826        pwq = get_work_pwq(work);
2827        if (pwq) {
2828                if (unlikely(pwq->pool != pool))
2829                        goto already_gone;
2830        } else {
2831                worker = find_worker_executing_work(pool, work);
2832                if (!worker)
2833                        goto already_gone;
2834                pwq = worker->current_pwq;
2835        }
2836
2837        check_flush_dependency(pwq->wq, work);
2838
2839        insert_wq_barrier(pwq, barr, work, worker);
2840        spin_unlock_irq(&pool->lock);
2841
2842        /*
2843         * Force a lock recursion deadlock when using flush_work() inside a
2844         * single-threaded or rescuer equipped workqueue.
2845         *
2846         * For single threaded workqueues the deadlock happens when the work
2847         * is after the work issuing the flush_work(). For rescuer equipped
2848         * workqueues the deadlock happens when the rescuer stalls, blocking
2849         * forward progress.
2850         */
2851        if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) {
2852                lock_map_acquire(&pwq->wq->lockdep_map);
2853                lock_map_release(&pwq->wq->lockdep_map);
2854        }
2855
2856        return true;
2857already_gone:
2858        spin_unlock_irq(&pool->lock);
2859        return false;
2860}
2861
2862/**
2863 * flush_work - wait for a work to finish executing the last queueing instance
2864 * @work: the work to flush
2865 *
2866 * Wait until @work has finished execution.  @work is guaranteed to be idle
2867 * on return if it hasn't been requeued since flush started.
2868 *
2869 * Return:
2870 * %true if flush_work() waited for the work to finish execution,
2871 * %false if it was already idle.
2872 */
2873bool flush_work(struct work_struct *work)
2874{
2875        struct wq_barrier barr;
2876
2877        if (WARN_ON(!wq_online))
2878                return false;
2879
2880        lock_map_acquire(&work->lockdep_map);
2881        lock_map_release(&work->lockdep_map);
2882
2883        if (start_flush_work(work, &barr)) {
2884                wait_for_completion(&barr.done);
2885                destroy_work_on_stack(&barr.work);
2886                return true;
2887        } else {
2888                return false;
2889        }
2890}
2891EXPORT_SYMBOL_GPL(flush_work);
2892
2893struct cwt_wait {
2894        wait_queue_entry_t              wait;
2895        struct work_struct      *work;
2896};
2897
2898static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
2899{
2900        struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
2901
2902        if (cwait->work != key)
2903                return 0;
2904        return autoremove_wake_function(wait, mode, sync, key);
2905}
2906
2907static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2908{
2909        static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
2910        unsigned long flags;
2911        int ret;
2912
2913        do {
2914                ret = try_to_grab_pending(work, is_dwork, &flags);
2915                /*
2916                 * If someone else is already canceling, wait for it to
2917                 * finish.  flush_work() doesn't work for PREEMPT_NONE
2918                 * because we may get scheduled between @work's completion
2919                 * and the other canceling task resuming and clearing
2920                 * CANCELING - flush_work() will return false immediately
2921                 * as @work is no longer busy, try_to_grab_pending() will
2922                 * return -ENOENT as @work is still being canceled and the
2923                 * other canceling task won't be able to clear CANCELING as
2924                 * we're hogging the CPU.
2925                 *
2926                 * Let's wait for completion using a waitqueue.  As this
2927                 * may lead to the thundering herd problem, use a custom
2928                 * wake function which matches @work along with exclusive
2929                 * wait and wakeup.
2930                 */
2931                if (unlikely(ret == -ENOENT)) {
2932                        struct cwt_wait cwait;
2933
2934                        init_wait(&cwait.wait);
2935                        cwait.wait.func = cwt_wakefn;
2936                        cwait.work = work;
2937
2938                        prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
2939                                                  TASK_UNINTERRUPTIBLE);
2940                        if (work_is_canceling(work))
2941                                schedule();
2942                        finish_wait(&cancel_waitq, &cwait.wait);
2943                }
2944        } while (unlikely(ret < 0));
2945
2946        /* tell other tasks trying to grab @work to back off */
2947        mark_work_canceling(work);
2948        local_irq_restore(flags);
2949
2950        /*
2951         * This allows canceling during early boot.  We know that @work
2952         * isn't executing.
2953         */
2954        if (wq_online)
2955                flush_work(work);
2956
2957        clear_work_data(work);
2958
2959        /*
2960         * Paired with prepare_to_wait() above so that either
2961         * waitqueue_active() is visible here or !work_is_canceling() is
2962         * visible there.
2963         */
2964        smp_mb();
2965        if (waitqueue_active(&cancel_waitq))
2966                __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
2967
2968        return ret;
2969}
2970
2971/**
2972 * cancel_work_sync - cancel a work and wait for it to finish
2973 * @work: the work to cancel
2974 *
2975 * Cancel @work and wait for its execution to finish.  This function
2976 * can be used even if the work re-queues itself or migrates to
2977 * another workqueue.  On return from this function, @work is
2978 * guaranteed to be not pending or executing on any CPU.
2979 *
2980 * cancel_work_sync(&delayed_work->work) must not be used for
2981 * delayed_work's.  Use cancel_delayed_work_sync() instead.
2982 *
2983 * The caller must ensure that the workqueue on which @work was last
2984 * queued can't be destroyed before this function returns.
2985 *
2986 * Return:
2987 * %true if @work was pending, %false otherwise.
2988 */
2989bool cancel_work_sync(struct work_struct *work)
2990{
2991        return __cancel_work_timer(work, false);
2992}
2993EXPORT_SYMBOL_GPL(cancel_work_sync);
2994
2995/**
2996 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2997 * @dwork: the delayed work to flush
2998 *
2999 * Delayed timer is cancelled and the pending work is queued for
3000 * immediate execution.  Like flush_work(), this function only
3001 * considers the last queueing instance of @dwork.
3002 *
3003 * Return:
3004 * %true if flush_work() waited for the work to finish execution,
3005 * %false if it was already idle.
3006 */
3007bool flush_delayed_work(struct delayed_work *dwork)
3008{
3009        local_irq_disable();
3010        if (del_timer_sync(&dwork->timer))
3011                __queue_work(dwork->cpu, dwork->wq, &dwork->work);
3012        local_irq_enable();
3013        return flush_work(&dwork->work);
3014}
3015EXPORT_SYMBOL(flush_delayed_work);
3016
3017static bool __cancel_work(struct work_struct *work, bool is_dwork)
3018{
3019        unsigned long flags;
3020        int ret;
3021
3022        do {
3023                ret = try_to_grab_pending(work, is_dwork, &flags);
3024        } while (unlikely(ret == -EAGAIN));
3025
3026        if (unlikely(ret < 0))
3027                return false;
3028
3029        set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3030        local_irq_restore(flags);
3031        return ret;
3032}
3033
3034/*
3035 * See cancel_delayed_work()
3036 */
3037bool cancel_work(struct work_struct *work)
3038{
3039        return __cancel_work(work, false);
3040}
3041
3042/**
3043 * cancel_delayed_work - cancel a delayed work
3044 * @dwork: delayed_work to cancel
3045 *
3046 * Kill off a pending delayed_work.
3047 *
3048 * Return: %true if @dwork was pending and canceled; %false if it wasn't
3049 * pending.
3050 *
3051 * Note:
3052 * The work callback function may still be running on return, unless
3053 * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
3054 * use cancel_delayed_work_sync() to wait on it.
3055 *
3056 * This function is safe to call from any context including IRQ handler.
3057 */
3058bool cancel_delayed_work(struct delayed_work *dwork)
3059{
3060        return __cancel_work(&dwork->work, true);
3061}
3062EXPORT_SYMBOL(cancel_delayed_work);
3063
3064/**
3065 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3066 * @dwork: the delayed work cancel
3067 *
3068 * This is cancel_work_sync() for delayed works.
3069 *
3070 * Return:
3071 * %true if @dwork was pending, %false otherwise.
3072 */
3073bool cancel_delayed_work_sync(struct delayed_work *dwork)
3074{
3075        return __cancel_work_timer(&dwork->work, true);
3076}
3077EXPORT_SYMBOL(cancel_delayed_work_sync);
3078
3079/**
3080 * schedule_on_each_cpu - execute a function synchronously on each online CPU
3081 * @func: the function to call
3082 *
3083 * schedule_on_each_cpu() executes @func on each online CPU using the
3084 * system workqueue and blocks until all CPUs have completed.
3085 * schedule_on_each_cpu() is very slow.
3086 *
3087 * Return:
3088 * 0 on success, -errno on failure.
3089 */
3090int schedule_on_each_cpu(work_func_t func)
3091{
3092        int cpu;
3093        struct work_struct __percpu *works;
3094
3095        works = alloc_percpu(struct work_struct);
3096        if (!works)
3097                return -ENOMEM;
3098
3099        get_online_cpus();
3100
3101        for_each_online_cpu(cpu) {
3102                struct work_struct *work = per_cpu_ptr(works, cpu);
3103
3104                INIT_WORK(work, func);
3105                schedule_work_on(cpu, work);
3106        }
3107
3108        for_each_online_cpu(cpu)
3109                flush_work(per_cpu_ptr(works, cpu));
3110
3111        put_online_cpus();
3112        free_percpu(works);
3113        return 0;
3114}
3115
3116/**
3117 * execute_in_process_context - reliably execute the routine with user context
3118 * @fn:         the function to execute
3119 * @ew:         guaranteed storage for the execute work structure (must
3120 *              be available when the work executes)
3121 *
3122 * Executes the function immediately if process context is available,
3123 * otherwise schedules the function for delayed execution.
3124 *
3125 * Return:      0 - function was executed
3126 *              1 - function was scheduled for execution
3127 */
3128int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3129{
3130        if (!in_interrupt()) {
3131                fn(&ew->work);
3132                return 0;
3133        }
3134
3135        INIT_WORK(&ew->work, fn);
3136        schedule_work(&ew->work);
3137
3138        return 1;
3139}
3140EXPORT_SYMBOL_GPL(execute_in_process_context);
3141
3142/**
3143 * free_workqueue_attrs - free a workqueue_attrs
3144 * @attrs: workqueue_attrs to free
3145 *
3146 * Undo alloc_workqueue_attrs().
3147 */
3148void free_workqueue_attrs(struct workqueue_attrs *attrs)
3149{
3150        if (attrs) {
3151                free_cpumask_var(attrs->cpumask);
3152                kfree(attrs);
3153        }
3154}
3155
3156/**
3157 * alloc_workqueue_attrs - allocate a workqueue_attrs
3158 * @gfp_mask: allocation mask to use
3159 *
3160 * Allocate a new workqueue_attrs, initialize with default settings and
3161 * return it.
3162 *
3163 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3164 */
3165struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
3166{
3167        struct workqueue_attrs *attrs;
3168
3169        attrs = kzalloc(sizeof(*attrs), gfp_mask);
3170        if (!attrs)
3171                goto fail;
3172        if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
3173                goto fail;
3174
3175        cpumask_copy(attrs->cpumask, cpu_possible_mask);
3176        return attrs;
3177fail:
3178        free_workqueue_attrs(attrs);
3179        return NULL;
3180}
3181
3182static void copy_workqueue_attrs(struct workqueue_attrs *to,
3183                                 const struct workqueue_attrs *from)
3184{
3185        to->nice = from->nice;
3186        cpumask_copy(to->cpumask, from->cpumask);
3187        /*
3188         * Unlike hash and equality test, this function doesn't ignore
3189         * ->no_numa as it is used for both pool and wq attrs.  Instead,
3190         * get_unbound_pool() explicitly clears ->no_numa after copying.
3191         */
3192        to->no_numa = from->no_numa;
3193}
3194
3195/* hash value of the content of @attr */
3196static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3197{
3198        u32 hash = 0;
3199
3200        hash = jhash_1word(attrs->nice, hash);
3201        hash = jhash(cpumask_bits(attrs->cpumask),
3202                     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3203        return hash;
3204}
3205
3206/* content equality test */
3207static bool wqattrs_equal(const struct workqueue_attrs *a,
3208                          const struct workqueue_attrs *b)
3209{
3210        if (a->nice != b->nice)
3211                return false;
3212        if (!cpumask_equal(a->cpumask, b->cpumask))
3213                return false;
3214        return true;
3215}
3216
3217/**
3218 * init_worker_pool - initialize a newly zalloc'd worker_pool
3219 * @pool: worker_pool to initialize
3220 *
3221 * Initialize a newly zalloc'd @pool.  It also allocates @pool->attrs.
3222 *
3223 * Return: 0 on success, -errno on failure.  Even on failure, all fields
3224 * inside @pool proper are initialized and put_unbound_pool() can be called
3225 * on @pool safely to release it.
3226 */
3227static int init_worker_pool(struct worker_pool *pool)
3228{
3229        spin_lock_init(&pool->lock);
3230        pool->id = -1;
3231        pool->cpu = -1;
3232        pool->node = NUMA_NO_NODE;
3233        pool->flags |= POOL_DISASSOCIATED;
3234        pool->watchdog_ts = jiffies;
3235        INIT_LIST_HEAD(&pool->worklist);
3236        INIT_LIST_HEAD(&pool->idle_list);
3237        hash_init(pool->busy_hash);
3238
3239        setup_deferrable_timer(&pool->idle_timer, idle_worker_timeout,
3240                               (unsigned long)pool);
3241
3242        setup_timer(&pool->mayday_timer, pool_mayday_timeout,
3243                    (unsigned long)pool);
3244
3245        mutex_init(&pool->attach_mutex);
3246        INIT_LIST_HEAD(&pool->workers);
3247
3248        ida_init(&pool->worker_ida);
3249        INIT_HLIST_NODE(&pool->hash_node);
3250        pool->refcnt = 1;
3251
3252        /* shouldn't fail above this point */
3253        pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
3254        if (!pool->attrs)
3255                return -ENOMEM;
3256        return 0;
3257}
3258
3259static void rcu_free_wq(struct rcu_head *rcu)
3260{
3261        struct workqueue_struct *wq =
3262                container_of(rcu, struct workqueue_struct, rcu);
3263
3264        if (!(wq->flags & WQ_UNBOUND))
3265                free_percpu(wq->cpu_pwqs);
3266        else
3267                free_workqueue_attrs(wq->unbound_attrs);
3268
3269        kfree(wq->rescuer);
3270        kfree(wq);
3271}
3272
3273static void rcu_free_pool(struct rcu_head *rcu)
3274{
3275        struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3276
3277        ida_destroy(&pool->worker_ida);
3278        free_workqueue_attrs(pool->attrs);
3279        kfree(pool);
3280}
3281
3282/**
3283 * put_unbound_pool - put a worker_pool
3284 * @pool: worker_pool to put
3285 *
3286 * Put @pool.  If its refcnt reaches zero, it gets destroyed in sched-RCU
3287 * safe manner.  get_unbound_pool() calls this function on its failure path
3288 * and this function should be able to release pools which went through,
3289 * successfully or not, init_worker_pool().
3290 *
3291 * Should be called with wq_pool_mutex held.
3292 */
3293static void put_unbound_pool(struct worker_pool *pool)
3294{
3295        DECLARE_COMPLETION_ONSTACK(detach_completion);
3296        struct worker *worker;
3297
3298        lockdep_assert_held(&wq_pool_mutex);
3299
3300        if (--pool->refcnt)
3301                return;
3302
3303        /* sanity checks */
3304        if (WARN_ON(!(pool->cpu < 0)) ||
3305            WARN_ON(!list_empty(&pool->worklist)))
3306                return;
3307
3308        /* release id and unhash */
3309        if (pool->id >= 0)
3310                idr_remove(&worker_pool_idr, pool->id);
3311        hash_del(&pool->hash_node);
3312
3313        /*
3314         * Become the manager and destroy all workers.  This prevents
3315         * @pool's workers from blocking on attach_mutex.  We're the last
3316         * manager and @pool gets freed with the flag set.
3317         */
3318        spin_lock_irq(&pool->lock);
3319        wait_event_lock_irq(wq_manager_wait,
3320                            !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
3321        pool->flags |= POOL_MANAGER_ACTIVE;
3322
3323        while ((worker = first_idle_worker(pool)))
3324                destroy_worker(worker);
3325        WARN_ON(pool->nr_workers || pool->nr_idle);
3326        spin_unlock_irq(&pool->lock);
3327
3328        mutex_lock(&pool->attach_mutex);
3329        if (!list_empty(&pool->workers))
3330                pool->detach_completion = &detach_completion;
3331        mutex_unlock(&pool->attach_mutex);
3332
3333        if (pool->detach_completion)
3334                wait_for_completion(pool->detach_completion);
3335
3336        /* shut down the timers */
3337        del_timer_sync(&pool->idle_timer);
3338        del_timer_sync(&pool->mayday_timer);
3339
3340        /* sched-RCU protected to allow dereferences from get_work_pool() */
3341        call_rcu_sched(&pool->rcu, rcu_free_pool);
3342}
3343
3344/**
3345 * get_unbound_pool - get a worker_pool with the specified attributes
3346 * @attrs: the attributes of the worker_pool to get
3347 *
3348 * Obtain a worker_pool which has the same attributes as @attrs, bump the
3349 * reference count and return it.  If there already is a matching
3350 * worker_pool, it will be used; otherwise, this function attempts to
3351 * create a new one.
3352 *
3353 * Should be called with wq_pool_mutex held.
3354 *
3355 * Return: On success, a worker_pool with the same attributes as @attrs.
3356 * On failure, %NULL.
3357 */
3358static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3359{
3360        u32 hash = wqattrs_hash(attrs);
3361        struct worker_pool *pool;
3362        int node;
3363        int target_node = NUMA_NO_NODE;
3364
3365        lockdep_assert_held(&wq_pool_mutex);
3366
3367        /* do we already have a matching pool? */
3368        hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3369                if (wqattrs_equal(pool->attrs, attrs)) {
3370                        pool->refcnt++;
3371                        return pool;
3372                }
3373        }
3374
3375        /* if cpumask is contained inside a NUMA node, we belong to that node */
3376        if (wq_numa_enabled) {
3377                for_each_node(node) {
3378                        if (cpumask_subset(attrs->cpumask,
3379                                           wq_numa_possible_cpumask[node])) {
3380                                target_node = node;
3381                                break;
3382                        }
3383                }
3384        }
3385
3386        /* nope, create a new one */
3387        pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
3388        if (!pool || init_worker_pool(pool) < 0)
3389                goto fail;
3390
3391        lockdep_set_subclass(&pool->lock, 1);   /* see put_pwq() */
3392        copy_workqueue_attrs(pool->attrs, attrs);
3393        pool->node = target_node;
3394
3395        /*
3396         * no_numa isn't a worker_pool attribute, always clear it.  See
3397         * 'struct workqueue_attrs' comments for detail.
3398         */
3399        pool->attrs->no_numa = false;
3400
3401        if (worker_pool_assign_id(pool) < 0)
3402                goto fail;
3403
3404        /* create and start the initial worker */
3405        if (wq_online && !create_worker(pool))
3406                goto fail;
3407
3408        /* install */
3409        hash_add(unbound_pool_hash, &pool->hash_node, hash);
3410
3411        return pool;
3412fail:
3413        if (pool)
3414                put_unbound_pool(pool);
3415        return NULL;
3416}
3417
3418static void rcu_free_pwq(struct rcu_head *rcu)
3419{
3420        kmem_cache_free(pwq_cache,
3421                        container_of(rcu, struct pool_workqueue, rcu));
3422}
3423
3424/*
3425 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3426 * and needs to be destroyed.
3427 */
3428static void pwq_unbound_release_workfn(struct work_struct *work)
3429{
3430        struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3431                                                  unbound_release_work);
3432        struct workqueue_struct *wq = pwq->wq;
3433        struct worker_pool *pool = pwq->pool;
3434        bool is_last;
3435
3436        if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3437                return;
3438
3439        mutex_lock(&wq->mutex);
3440        list_del_rcu(&pwq->pwqs_node);
3441        is_last = list_empty(&wq->pwqs);
3442        mutex_unlock(&wq->mutex);
3443
3444        mutex_lock(&wq_pool_mutex);
3445        put_unbound_pool(pool);
3446        mutex_unlock(&wq_pool_mutex);
3447
3448        call_rcu_sched(&pwq->rcu, rcu_free_pwq);
3449
3450        /*
3451         * If we're the last pwq going away, @wq is already dead and no one
3452         * is gonna access it anymore.  Schedule RCU free.
3453         */
3454        if (is_last)
3455                call_rcu_sched(&wq->rcu, rcu_free_wq);
3456}
3457
3458/**
3459 * pwq_adjust_max_active - update a pwq's max_active to the current setting
3460 * @pwq: target pool_workqueue
3461 *
3462 * If @pwq isn't freezing, set @pwq->max_active to the associated
3463 * workqueue's saved_max_active and activate delayed work items
3464 * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
3465 */
3466static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3467{
3468        struct workqueue_struct *wq = pwq->wq;
3469        bool freezable = wq->flags & WQ_FREEZABLE;
3470        unsigned long flags;
3471
3472        /* for @wq->saved_max_active */
3473        lockdep_assert_held(&wq->mutex);
3474
3475        /* fast exit for non-freezable wqs */
3476        if (!freezable && pwq->max_active == wq->saved_max_active)
3477                return;
3478
3479        /* this function can be called during early boot w/ irq disabled */
3480        spin_lock_irqsave(&pwq->pool->lock, flags);
3481
3482        /*
3483         * During [un]freezing, the caller is responsible for ensuring that
3484         * this function is called at least once after @workqueue_freezing
3485         * is updated and visible.
3486         */
3487        if (!freezable || !workqueue_freezing) {
3488                pwq->max_active = wq->saved_max_active;
3489
3490                while (!list_empty(&pwq->delayed_works) &&
3491                       pwq->nr_active < pwq->max_active)
3492                        pwq_activate_first_delayed(pwq);
3493
3494                /*
3495                 * Need to kick a worker after thawed or an unbound wq's
3496                 * max_active is bumped.  It's a slow path.  Do it always.
3497                 */
3498                wake_up_worker(pwq->pool);
3499        } else {
3500                pwq->max_active = 0;
3501        }
3502
3503        spin_unlock_irqrestore(&pwq->pool->lock, flags);
3504}
3505
3506/* initialize newly alloced @pwq which is associated with @wq and @pool */
3507static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3508                     struct worker_pool *pool)
3509{
3510        BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3511
3512        memset(pwq, 0, sizeof(*pwq));
3513
3514        pwq->pool = pool;
3515        pwq->wq = wq;
3516        pwq->flush_color = -1;
3517        pwq->refcnt = 1;
3518        INIT_LIST_HEAD(&pwq->delayed_works);
3519        INIT_LIST_HEAD(&pwq->pwqs_node);
3520        INIT_LIST_HEAD(&pwq->mayday_node);
3521        INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3522}
3523
3524/* sync @pwq with the current state of its associated wq and link it */
3525static void link_pwq(struct pool_workqueue *pwq)
3526{
3527        struct workqueue_struct *wq = pwq->wq;
3528
3529        lockdep_assert_held(&wq->mutex);
3530
3531        /* may be called multiple times, ignore if already linked */
3532        if (!list_empty(&pwq->pwqs_node))
3533                return;
3534
3535        /* set the matching work_color */
3536        pwq->work_color = wq->work_color;
3537
3538        /* sync max_active to the current setting */
3539        pwq_adjust_max_active(pwq);
3540
3541        /* link in @pwq */
3542        list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3543}
3544
3545/* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3546static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3547                                        const struct workqueue_attrs *attrs)
3548{
3549        struct worker_pool *pool;
3550        struct pool_workqueue *pwq;
3551
3552        lockdep_assert_held(&wq_pool_mutex);
3553
3554        pool = get_unbound_pool(attrs);
3555        if (!pool)
3556                return NULL;
3557
3558        pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3559        if (!pwq) {
3560                put_unbound_pool(pool);
3561                return NULL;
3562        }
3563
3564        init_pwq(pwq, wq, pool);
3565        return pwq;
3566}
3567
3568/**
3569 * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
3570 * @attrs: the wq_attrs of the default pwq of the target workqueue
3571 * @node: the target NUMA node
3572 * @cpu_going_down: if >= 0, the CPU to consider as offline
3573 * @cpumask: outarg, the resulting cpumask
3574 *
3575 * Calculate the cpumask a workqueue with @attrs should use on @node.  If
3576 * @cpu_going_down is >= 0, that cpu is considered offline during
3577 * calculation.  The result is stored in @cpumask.
3578 *
3579 * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
3580 * enabled and @node has online CPUs requested by @attrs, the returned
3581 * cpumask is the intersection of the possible CPUs of @node and
3582 * @attrs->cpumask.
3583 *
3584 * The caller is responsible for ensuring that the cpumask of @node stays
3585 * stable.
3586 *
3587 * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3588 * %false if equal.
3589 */
3590static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3591                                 int cpu_going_down, cpumask_t *cpumask)
3592{
3593        if (!wq_numa_enabled || attrs->no_numa)
3594                goto use_dfl;
3595
3596        /* does @node have any online CPUs @attrs wants? */
3597        cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3598        if (cpu_going_down >= 0)
3599                cpumask_clear_cpu(cpu_going_down, cpumask);
3600
3601        if (cpumask_empty(cpumask))
3602                goto use_dfl;
3603
3604        /* yeap, return possible CPUs in @node that @attrs wants */
3605        cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3606
3607        if (cpumask_empty(cpumask)) {
3608                pr_warn_once("WARNING: workqueue cpumask: online intersect > "
3609                                "possible intersect\n");
3610                return false;
3611        }
3612
3613        return !cpumask_equal(cpumask, attrs->cpumask);
3614
3615use_dfl:
3616        cpumask_copy(cpumask, attrs->cpumask);
3617        return false;
3618}
3619
3620/* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
3621static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3622                                                   int node,
3623                                                   struct pool_workqueue *pwq)
3624{
3625        struct pool_workqueue *old_pwq;
3626
3627        lockdep_assert_held(&wq_pool_mutex);
3628        lockdep_assert_held(&wq->mutex);
3629
3630        /* link_pwq() can handle duplicate calls */
3631        link_pwq(pwq);
3632
3633        old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3634        rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3635        return old_pwq;
3636}
3637
3638/* context to store the prepared attrs & pwqs before applying */
3639struct apply_wqattrs_ctx {
3640        struct workqueue_struct *wq;            /* target workqueue */
3641        struct workqueue_attrs  *attrs;         /* attrs to apply */
3642        struct list_head        list;           /* queued for batching commit */
3643        struct pool_workqueue   *dfl_pwq;
3644        struct pool_workqueue   *pwq_tbl[];
3645};
3646
3647/* free the resources after success or abort */
3648static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
3649{
3650        if (ctx) {
3651                int node;
3652
3653                for_each_node(node)
3654                        put_pwq_unlocked(ctx->pwq_tbl[node]);
3655                put_pwq_unlocked(ctx->dfl_pwq);
3656
3657                free_workqueue_attrs(ctx->attrs);
3658
3659                kfree(ctx);
3660        }
3661}
3662
3663/* allocate the attrs and pwqs for later installation */
3664static struct apply_wqattrs_ctx *
3665apply_wqattrs_prepare(struct workqueue_struct *wq,
3666                      const struct workqueue_attrs *attrs)
3667{
3668        struct apply_wqattrs_ctx *ctx;
3669        struct workqueue_attrs *new_attrs, *tmp_attrs;
3670        int node;
3671
3672        lockdep_assert_held(&wq_pool_mutex);
3673
3674        ctx = kzalloc(sizeof(*ctx) + nr_node_ids * sizeof(ctx->pwq_tbl[0]),
3675                      GFP_KERNEL);
3676
3677        new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3678        tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3679        if (!ctx || !new_attrs || !tmp_attrs)
3680                goto out_free;
3681
3682        /*
3683         * Calculate the attrs of the default pwq.
3684         * If the user configured cpumask doesn't overlap with the
3685         * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
3686         */
3687        copy_workqueue_attrs(new_attrs, attrs);
3688        cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
3689        if (unlikely(cpumask_empty(new_attrs->cpumask)))
3690                cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
3691
3692        /*
3693         * We may create multiple pwqs with differing cpumasks.  Make a
3694         * copy of @new_attrs which will be modified and used to obtain
3695         * pools.
3696         */
3697        copy_workqueue_attrs(tmp_attrs, new_attrs);
3698
3699        /*
3700         * If something goes wrong during CPU up/down, we'll fall back to
3701         * the default pwq covering whole @attrs->cpumask.  Always create
3702         * it even if we don't use it immediately.
3703         */
3704        ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3705        if (!ctx->dfl_pwq)
3706                goto out_free;
3707
3708        for_each_node(node) {
3709                if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
3710                        ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3711                        if (!ctx->pwq_tbl[node])
3712                                goto out_free;
3713                } else {
3714                        ctx->dfl_pwq->refcnt++;
3715                        ctx->pwq_tbl[node] = ctx->dfl_pwq;
3716                }
3717        }
3718
3719        /* save the user configured attrs and sanitize it. */
3720        copy_workqueue_attrs(new_attrs, attrs);
3721        cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
3722        ctx->attrs = new_attrs;
3723
3724        ctx->wq = wq;
3725        free_workqueue_attrs(tmp_attrs);
3726        return ctx;
3727
3728out_free:
3729        free_workqueue_attrs(tmp_attrs);
3730        free_workqueue_attrs(new_attrs);
3731        apply_wqattrs_cleanup(ctx);
3732        return NULL;
3733}
3734
3735/* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
3736static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
3737{
3738        int node;
3739
3740        /* all pwqs have been created successfully, let's install'em */
3741        mutex_lock(&ctx->wq->mutex);
3742
3743        copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
3744
3745        /* save the previous pwq and install the new one */
3746        for_each_node(node)
3747                ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
3748                                                          ctx->pwq_tbl[node]);
3749
3750        /* @dfl_pwq might not have been used, ensure it's linked */
3751        link_pwq(ctx->dfl_pwq);
3752        swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
3753
3754        mutex_unlock(&ctx->wq->mutex);
3755}
3756
3757static void apply_wqattrs_lock(void)
3758{
3759        /* CPUs should stay stable across pwq creations and installations */
3760        get_online_cpus();
3761        mutex_lock(&wq_pool_mutex);
3762}
3763
3764static void apply_wqattrs_unlock(void)
3765{
3766        mutex_unlock(&wq_pool_mutex);
3767        put_online_cpus();
3768}
3769
3770static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
3771                                        const struct workqueue_attrs *attrs)
3772{
3773        struct apply_wqattrs_ctx *ctx;
3774
3775        /* only unbound workqueues can change attributes */
3776        if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
3777                return -EINVAL;
3778
3779        /* creating multiple pwqs breaks ordering guarantee */
3780        if (!list_empty(&wq->pwqs)) {
3781                if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
3782                        return -EINVAL;
3783
3784                wq->flags &= ~__WQ_ORDERED;
3785        }
3786
3787        ctx = apply_wqattrs_prepare(wq, attrs);
3788        if (!ctx)
3789                return -ENOMEM;
3790
3791        /* the ctx has been prepared successfully, let's commit it */
3792        apply_wqattrs_commit(ctx);
3793        apply_wqattrs_cleanup(ctx);
3794
3795        return 0;
3796}
3797
3798/**
3799 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
3800 * @wq: the target workqueue
3801 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
3802 *
3803 * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
3804 * machines, this function maps a separate pwq to each NUMA node with
3805 * possibles CPUs in @attrs->cpumask so that work items are affine to the
3806 * NUMA node it was issued on.  Older pwqs are released as in-flight work
3807 * items finish.  Note that a work item which repeatedly requeues itself
3808 * back-to-back will stay on its current pwq.
3809 *
3810 * Performs GFP_KERNEL allocations.
3811 *
3812 * Return: 0 on success and -errno on failure.
3813 */
3814int apply_workqueue_attrs(struct workqueue_struct *wq,
3815                          const struct workqueue_attrs *attrs)
3816{
3817        int ret;
3818
3819        apply_wqattrs_lock();
3820        ret = apply_workqueue_attrs_locked(wq, attrs);
3821        apply_wqattrs_unlock();
3822
3823        return ret;
3824}
3825
3826/**
3827 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
3828 * @wq: the target workqueue
3829 * @cpu: the CPU coming up or going down
3830 * @online: whether @cpu is coming up or going down
3831 *
3832 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
3833 * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update NUMA affinity of
3834 * @wq accordingly.
3835 *
3836 * If NUMA affinity can't be adjusted due to memory allocation failure, it
3837 * falls back to @wq->dfl_pwq which may not be optimal but is always
3838 * correct.
3839 *
3840 * Note that when the last allowed CPU of a NUMA node goes offline for a
3841 * workqueue with a cpumask spanning multiple nodes, the workers which were
3842 * already executing the work items for the workqueue will lose their CPU
3843 * affinity and may execute on any CPU.  This is similar to how per-cpu
3844 * workqueues behave on CPU_DOWN.  If a workqueue user wants strict
3845 * affinity, it's the user's responsibility to flush the work item from
3846 * CPU_DOWN_PREPARE.
3847 */
3848static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
3849                                   bool online)
3850{
3851        int node = cpu_to_node(cpu);
3852        int cpu_off = online ? -1 : cpu;
3853        struct pool_workqueue *old_pwq = NULL, *pwq;
3854        struct workqueue_attrs *target_attrs;
3855        cpumask_t *cpumask;
3856
3857        lockdep_assert_held(&wq_pool_mutex);
3858
3859        if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
3860            wq->unbound_attrs->no_numa)
3861                return;
3862
3863        /*
3864         * We don't wanna alloc/free wq_attrs for each wq for each CPU.
3865         * Let's use a preallocated one.  The following buf is protected by
3866         * CPU hotplug exclusion.
3867         */
3868        target_attrs = wq_update_unbound_numa_attrs_buf;
3869        cpumask = target_attrs->cpumask;
3870
3871        copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
3872        pwq = unbound_pwq_by_node(wq, node);
3873
3874        /*
3875         * Let's determine what needs to be done.  If the target cpumask is
3876         * different from the default pwq's, we need to compare it to @pwq's
3877         * and create a new one if they don't match.  If the target cpumask
3878         * equals the default pwq's, the default pwq should be used.
3879         */
3880        if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
3881                if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
3882                        return;
3883        } else {
3884                goto use_dfl_pwq;
3885        }
3886
3887        /* create a new pwq */
3888        pwq = alloc_unbound_pwq(wq, target_attrs);
3889        if (!pwq) {
3890                pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
3891                        wq->name);
3892                goto use_dfl_pwq;
3893        }
3894
3895        /* Install the new pwq. */
3896        mutex_lock(&wq->mutex);
3897        old_pwq = numa_pwq_tbl_install(wq, node, pwq);
3898        goto out_unlock;
3899
3900use_dfl_pwq:
3901        mutex_lock(&wq->mutex);
3902        spin_lock_irq(&wq->dfl_pwq->pool->lock);
3903        get_pwq(wq->dfl_pwq);
3904        spin_unlock_irq(&wq->dfl_pwq->pool->lock);
3905        old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
3906out_unlock:
3907        mutex_unlock(&wq->mutex);
3908        put_pwq_unlocked(old_pwq);
3909}
3910
3911static int alloc_and_link_pwqs(struct workqueue_struct *wq)
3912{
3913        bool highpri = wq->flags & WQ_HIGHPRI;
3914        int cpu, ret;
3915
3916        if (!(wq->flags & WQ_UNBOUND)) {
3917                wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
3918                if (!wq->cpu_pwqs)
3919                        return -ENOMEM;
3920
3921                for_each_possible_cpu(cpu) {
3922                        struct pool_workqueue *pwq =
3923                                per_cpu_ptr(wq->cpu_pwqs, cpu);
3924                        struct worker_pool *cpu_pools =
3925                                per_cpu(cpu_worker_pools, cpu);
3926
3927                        init_pwq(pwq, wq, &cpu_pools[highpri]);
3928
3929                        mutex_lock(&wq->mutex);
3930                        link_pwq(pwq);
3931                        mutex_unlock(&wq->mutex);
3932                }
3933                return 0;
3934        } else if (wq->flags & __WQ_ORDERED) {
3935                ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
3936                /* there should only be single pwq for ordering guarantee */
3937                WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
3938                              wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
3939                     "ordering guarantee broken for workqueue %s\n", wq->name);
3940                return ret;
3941        } else {
3942                return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
3943        }
3944}
3945
3946static int wq_clamp_max_active(int max_active, unsigned int flags,
3947                               const char *name)
3948{
3949        int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
3950
3951        if (max_active < 1 || max_active > lim)
3952                pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
3953                        max_active, name, 1, lim);
3954
3955        return clamp_val(max_active, 1, lim);
3956}
3957
3958struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3959                                               unsigned int flags,
3960                                               int max_active,
3961                                               struct lock_class_key *key,
3962                                               const char *lock_name, ...)
3963{
3964        size_t tbl_size = 0;
3965        va_list args;
3966        struct workqueue_struct *wq;
3967        struct pool_workqueue *pwq;
3968
3969        /*
3970         * Unbound && max_active == 1 used to imply ordered, which is no
3971         * longer the case on NUMA machines due to per-node pools.  While
3972         * alloc_ordered_workqueue() is the right way to create an ordered
3973         * workqueue, keep the previous behavior to avoid subtle breakages
3974         * on NUMA.
3975         */
3976        if ((flags & WQ_UNBOUND) && max_active == 1)
3977                flags |= __WQ_ORDERED;
3978
3979        /* see the comment above the definition of WQ_POWER_EFFICIENT */
3980        if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
3981                flags |= WQ_UNBOUND;
3982
3983        /* allocate wq and format name */
3984        if (flags & WQ_UNBOUND)
3985                tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
3986
3987        wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
3988        if (!wq)
3989                return NULL;
3990
3991        if (flags & WQ_UNBOUND) {
3992                wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3993                if (!wq->unbound_attrs)
3994                        goto err_free_wq;
3995        }
3996
3997        va_start(args, lock_name);
3998        vsnprintf(wq->name, sizeof(wq->name), fmt, args);
3999        va_end(args);
4000
4001        max_active = max_active ?: WQ_DFL_ACTIVE;
4002        max_active = wq_clamp_max_active(max_active, flags, wq->name);
4003
4004        /* init wq */
4005        wq->flags = flags;
4006        wq->saved_max_active = max_active;
4007        mutex_init(&wq->mutex);
4008        atomic_set(&wq->nr_pwqs_to_flush, 0);
4009        INIT_LIST_HEAD(&wq->pwqs);
4010        INIT_LIST_HEAD(&wq->flusher_queue);
4011        INIT_LIST_HEAD(&wq->flusher_overflow);
4012        INIT_LIST_HEAD(&wq->maydays);
4013
4014        lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
4015        INIT_LIST_HEAD(&wq->list);
4016
4017        if (alloc_and_link_pwqs(wq) < 0)
4018                goto err_free_wq;
4019
4020        /*
4021         * Workqueues which may be used during memory reclaim should
4022         * have a rescuer to guarantee forward progress.
4023         */
4024        if (flags & WQ_MEM_RECLAIM) {
4025                struct worker *rescuer;
4026
4027                rescuer = alloc_worker(NUMA_NO_NODE);
4028                if (!rescuer)
4029                        goto err_destroy;
4030
4031                rescuer->rescue_wq = wq;
4032                rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
4033                                               wq->name);
4034                if (IS_ERR(rescuer->task)) {
4035                        kfree(rescuer);
4036                        goto err_destroy;
4037                }
4038
4039                wq->rescuer = rescuer;
4040                kthread_bind_mask(rescuer->task, cpu_possible_mask);
4041                wake_up_process(rescuer->task);
4042        }
4043
4044        if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4045                goto err_destroy;
4046
4047        /*
4048         * wq_pool_mutex protects global freeze state and workqueues list.
4049         * Grab it, adjust max_active and add the new @wq to workqueues
4050         * list.
4051         */
4052        mutex_lock(&wq_pool_mutex);
4053
4054        mutex_lock(&wq->mutex);
4055        for_each_pwq(pwq, wq)
4056                pwq_adjust_max_active(pwq);
4057        mutex_unlock(&wq->mutex);
4058
4059        list_add_tail_rcu(&wq->list, &workqueues);
4060
4061        mutex_unlock(&wq_pool_mutex);
4062
4063        return wq;
4064
4065err_free_wq:
4066        free_workqueue_attrs(wq->unbound_attrs);
4067        kfree(wq);
4068        return NULL;
4069err_destroy:
4070        destroy_workqueue(wq);
4071        return NULL;
4072}
4073EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
4074
4075/**
4076 * destroy_workqueue - safely terminate a workqueue
4077 * @wq: target workqueue
4078 *
4079 * Safely destroy a workqueue. All work currently pending will be done first.
4080 */
4081void destroy_workqueue(struct workqueue_struct *wq)
4082{
4083        struct pool_workqueue *pwq;
4084        int node;
4085
4086        /* drain it before proceeding with destruction */
4087        drain_workqueue(wq);
4088
4089        /* sanity checks */
4090        mutex_lock(&wq->mutex);
4091        for_each_pwq(pwq, wq) {
4092                int i;
4093
4094                for (i = 0; i < WORK_NR_COLORS; i++) {
4095                        if (WARN_ON(pwq->nr_in_flight[i])) {
4096                                mutex_unlock(&wq->mutex);
4097                                show_workqueue_state();
4098                                return;
4099                        }
4100                }
4101
4102                if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
4103                    WARN_ON(pwq->nr_active) ||
4104                    WARN_ON(!list_empty(&pwq->delayed_works))) {
4105                        mutex_unlock(&wq->mutex);
4106                        show_workqueue_state();
4107                        return;
4108                }
4109        }
4110        mutex_unlock(&wq->mutex);
4111
4112        /*
4113         * wq list is used to freeze wq, remove from list after
4114         * flushing is complete in case freeze races us.
4115         */
4116        mutex_lock(&wq_pool_mutex);
4117        list_del_rcu(&wq->list);
4118        mutex_unlock(&wq_pool_mutex);
4119
4120        workqueue_sysfs_unregister(wq);
4121
4122        if (wq->rescuer)
4123                kthread_stop(wq->rescuer->task);
4124
4125        if (!(wq->flags & WQ_UNBOUND)) {
4126                /*
4127                 * The base ref is never dropped on per-cpu pwqs.  Directly
4128                 * schedule RCU free.
4129                 */
4130                call_rcu_sched(&wq->rcu, rcu_free_wq);
4131        } else {
4132                /*
4133                 * We're the sole accessor of @wq at this point.  Directly
4134                 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
4135                 * @wq will be freed when the last pwq is released.
4136                 */
4137                for_each_node(node) {
4138                        pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4139                        RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4140                        put_pwq_unlocked(pwq);
4141                }
4142
4143                /*
4144                 * Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
4145                 * put.  Don't access it afterwards.
4146                 */
4147                pwq = wq->dfl_pwq;
4148                wq->dfl_pwq = NULL;
4149                put_pwq_unlocked(pwq);
4150        }
4151}
4152EXPORT_SYMBOL_GPL(destroy_workqueue);
4153
4154/**
4155 * workqueue_set_max_active - adjust max_active of a workqueue
4156 * @wq: target workqueue
4157 * @max_active: new max_active value.
4158 *
4159 * Set max_active of @wq to @max_active.
4160 *
4161 * CONTEXT:
4162 * Don't call from IRQ context.
4163 */
4164void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4165{
4166        struct pool_workqueue *pwq;
4167
4168        /* disallow meddling with max_active for ordered workqueues */
4169        if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4170                return;
4171
4172        max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4173
4174        mutex_lock(&wq->mutex);
4175
4176        wq->flags &= ~__WQ_ORDERED;
4177        wq->saved_max_active = max_active;
4178
4179        for_each_pwq(pwq, wq)
4180                pwq_adjust_max_active(pwq);
4181
4182        mutex_unlock(&wq->mutex);
4183}
4184EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4185
4186/**
4187 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4188 *
4189 * Determine whether %current is a workqueue rescuer.  Can be used from
4190 * work functions to determine whether it's being run off the rescuer task.
4191 *
4192 * Return: %true if %current is a workqueue rescuer. %false otherwise.
4193 */
4194bool current_is_workqueue_rescuer(void)
4195{
4196        struct worker *worker = current_wq_worker();
4197
4198        return worker && worker->rescue_wq;
4199}
4200
4201/**
4202 * workqueue_congested - test whether a workqueue is congested
4203 * @cpu: CPU in question
4204 * @wq: target workqueue
4205 *
4206 * Test whether @wq's cpu workqueue for @cpu is congested.  There is
4207 * no synchronization around this function and the test result is
4208 * unreliable and only useful as advisory hints or for debugging.
4209 *
4210 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4211 * Note that both per-cpu and unbound workqueues may be associated with
4212 * multiple pool_workqueues which have separate congested states.  A
4213 * workqueue being congested on one CPU doesn't mean the workqueue is also
4214 * contested on other CPUs / NUMA nodes.
4215 *
4216 * Return:
4217 * %true if congested, %false otherwise.
4218 */
4219bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4220{
4221        struct pool_workqueue *pwq;
4222        bool ret;
4223
4224        rcu_read_lock_sched();
4225
4226        if (cpu == WORK_CPU_UNBOUND)
4227                cpu = smp_processor_id();
4228
4229        if (!(wq->flags & WQ_UNBOUND))
4230                pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4231        else
4232                pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4233
4234        ret = !list_empty(&pwq->delayed_works);
4235        rcu_read_unlock_sched();
4236
4237        return ret;
4238}
4239EXPORT_SYMBOL_GPL(workqueue_congested);
4240
4241/**
4242 * work_busy - test whether a work is currently pending or running
4243 * @work: the work to be tested
4244 *
4245 * Test whether @work is currently pending or running.  There is no
4246 * synchronization around this function and the test result is
4247 * unreliable and only useful as advisory hints or for debugging.
4248 *
4249 * Return:
4250 * OR'd bitmask of WORK_BUSY_* bits.
4251 */
4252unsigned int work_busy(struct work_struct *work)
4253{
4254        struct worker_pool *pool;
4255        unsigned long flags;
4256        unsigned int ret = 0;
4257
4258        if (work_pending(work))
4259                ret |= WORK_BUSY_PENDING;
4260
4261        local_irq_save(flags);
4262        pool = get_work_pool(work);
4263        if (pool) {
4264                spin_lock(&pool->lock);
4265                if (find_worker_executing_work(pool, work))
4266                        ret |= WORK_BUSY_RUNNING;
4267                spin_unlock(&pool->lock);
4268        }
4269        local_irq_restore(flags);
4270
4271        return ret;
4272}
4273EXPORT_SYMBOL_GPL(work_busy);
4274
4275/**
4276 * set_worker_desc - set description for the current work item
4277 * @fmt: printf-style format string
4278 * @...: arguments for the format string
4279 *
4280 * This function can be called by a running work function to describe what
4281 * the work item is about.  If the worker task gets dumped, this
4282 * information will be printed out together to help debugging.  The
4283 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4284 */
4285void set_worker_desc(const char *fmt, ...)
4286{
4287        struct worker *worker = current_wq_worker();
4288        va_list args;
4289
4290        if (worker) {
4291                va_start(args, fmt);
4292                vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4293                va_end(args);
4294                worker->desc_valid = true;
4295        }
4296}
4297
4298/**
4299 * print_worker_info - print out worker information and description
4300 * @log_lvl: the log level to use when printing
4301 * @task: target task
4302 *
4303 * If @task is a worker and currently executing a work item, print out the
4304 * name of the workqueue being serviced and worker description set with
4305 * set_worker_desc() by the currently executing work item.
4306 *
4307 * This function can be safely called on any task as long as the
4308 * task_struct itself is accessible.  While safe, this function isn't
4309 * synchronized and may print out mixups or garbages of limited length.
4310 */
4311void print_worker_info(const char *log_lvl, struct task_struct *task)
4312{
4313        work_func_t *fn = NULL;
4314        char name[WQ_NAME_LEN] = { };
4315        char desc[WORKER_DESC_LEN] = { };
4316        struct pool_workqueue *pwq = NULL;
4317        struct workqueue_struct *wq = NULL;
4318        bool desc_valid = false;
4319        struct worker *worker;
4320
4321        if (!(task->flags & PF_WQ_WORKER))
4322                return;
4323
4324        /*
4325         * This function is called without any synchronization and @task
4326         * could be in any state.  Be careful with dereferences.
4327         */
4328        worker = kthread_probe_data(task);
4329
4330        /*
4331         * Carefully copy the associated workqueue's workfn and name.  Keep
4332         * the original last '\0' in case the original contains garbage.
4333         */
4334        probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
4335        probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
4336        probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
4337        probe_kernel_read(name, wq->name, sizeof(name) - 1);
4338
4339        /* copy worker description */
4340        probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid));
4341        if (desc_valid)
4342                probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
4343
4344        if (fn || name[0] || desc[0]) {
4345                printk("%sWorkqueue: %s %pf", log_lvl, name, fn);
4346                if (desc[0])
4347                        pr_cont(" (%s)", desc);
4348                pr_cont("\n");
4349        }
4350}
4351
4352static void pr_cont_pool_info(struct worker_pool *pool)
4353{
4354        pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
4355        if (pool->node != NUMA_NO_NODE)
4356                pr_cont(" node=%d", pool->node);
4357        pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
4358}
4359
4360static void pr_cont_work(bool comma, struct work_struct *work)
4361{
4362        if (work->func == wq_barrier_func) {
4363                struct wq_barrier *barr;
4364
4365                barr = container_of(work, struct wq_barrier, work);
4366
4367                pr_cont("%s BAR(%d)", comma ? "," : "",
4368                        task_pid_nr(barr->task));
4369        } else {
4370                pr_cont("%s %pf", comma ? "," : "", work->func);
4371        }
4372}
4373
4374static void show_pwq(struct pool_workqueue *pwq)
4375{
4376        struct worker_pool *pool = pwq->pool;
4377        struct work_struct *work;
4378        struct worker *worker;
4379        bool has_in_flight = false, has_pending = false;
4380        int bkt;
4381
4382        pr_info("  pwq %d:", pool->id);
4383        pr_cont_pool_info(pool);
4384
4385        pr_cont(" active=%d/%d%s\n", pwq->nr_active, pwq->max_active,
4386                !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
4387
4388        hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4389                if (worker->current_pwq == pwq) {
4390                        has_in_flight = true;
4391                        break;
4392                }
4393        }
4394        if (has_in_flight) {
4395                bool comma = false;
4396
4397                pr_info("    in-flight:");
4398                hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4399                        if (worker->current_pwq != pwq)
4400                                continue;
4401
4402                        pr_cont("%s %d%s:%pf", comma ? "," : "",
4403                                task_pid_nr(worker->task),
4404                                worker == pwq->wq->rescuer ? "(RESCUER)" : "",
4405                                worker->current_func);
4406                        list_for_each_entry(work, &worker->scheduled, entry)
4407                                pr_cont_work(false, work);
4408                        comma = true;
4409                }
4410                pr_cont("\n");
4411        }
4412
4413        list_for_each_entry(work, &pool->worklist, entry) {
4414                if (get_work_pwq(work) == pwq) {
4415                        has_pending = true;
4416                        break;
4417                }
4418        }
4419        if (has_pending) {
4420                bool comma = false;
4421
4422                pr_info("    pending:");
4423                list_for_each_entry(work, &pool->worklist, entry) {
4424                        if (get_work_pwq(work) != pwq)
4425                                continue;
4426
4427                        pr_cont_work(comma, work);
4428                        comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4429                }
4430                pr_cont("\n");
4431        }
4432
4433        if (!list_empty(&pwq->delayed_works)) {
4434                bool comma = false;
4435
4436                pr_info("    delayed:");
4437                list_for_each_entry(work, &pwq->delayed_works, entry) {
4438                        pr_cont_work(comma, work);
4439                        comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4440                }
4441                pr_cont("\n");
4442        }
4443}
4444
4445/**
4446 * show_workqueue_state - dump workqueue state
4447 *
4448 * Called from a sysrq handler or try_to_freeze_tasks() and prints out
4449 * all busy workqueues and pools.
4450 */
4451void show_workqueue_state(void)
4452{
4453        struct workqueue_struct *wq;
4454        struct worker_pool *pool;
4455        unsigned long flags;
4456        int pi;
4457
4458        rcu_read_lock_sched();
4459
4460        pr_info("Showing busy workqueues and worker pools:\n");
4461
4462        list_for_each_entry_rcu(wq, &workqueues, list) {
4463                struct pool_workqueue *pwq;
4464                bool idle = true;
4465
4466                for_each_pwq(pwq, wq) {
4467                        if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
4468                                idle = false;
4469                                break;
4470                        }
4471                }
4472                if (idle)
4473                        continue;
4474
4475                pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4476
4477                for_each_pwq(pwq, wq) {
4478                        spin_lock_irqsave(&pwq->pool->lock, flags);
4479                        if (pwq->nr_active || !list_empty(&pwq->delayed_works))
4480                                show_pwq(pwq);
4481                        spin_unlock_irqrestore(&pwq->pool->lock, flags);
4482                }
4483        }
4484
4485        for_each_pool(pool, pi) {
4486                struct worker *worker;
4487                bool first = true;
4488
4489                spin_lock_irqsave(&pool->lock, flags);
4490                if (pool->nr_workers == pool->nr_idle)
4491                        goto next_pool;
4492
4493                pr_info("pool %d:", pool->id);
4494                pr_cont_pool_info(pool);
4495                pr_cont(" hung=%us workers=%d",
4496                        jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
4497                        pool->nr_workers);
4498                if (pool->manager)
4499                        pr_cont(" manager: %d",
4500                                task_pid_nr(pool->manager->task));
4501                list_for_each_entry(worker, &pool->idle_list, entry) {
4502                        pr_cont(" %s%d", first ? "idle: " : "",
4503                                task_pid_nr(worker->task));
4504                        first = false;
4505                }
4506                pr_cont("\n");
4507        next_pool:
4508                spin_unlock_irqrestore(&pool->lock, flags);
4509        }
4510
4511        rcu_read_unlock_sched();
4512}
4513
4514/*
4515 * CPU hotplug.
4516 *
4517 * There are two challenges in supporting CPU hotplug.  Firstly, there
4518 * are a lot of assumptions on strong associations among work, pwq and
4519 * pool which make migrating pending and scheduled works very
4520 * difficult to implement without impacting hot paths.  Secondly,
4521 * worker pools serve mix of short, long and very long running works making
4522 * blocked draining impractical.
4523 *
4524 * This is solved by allowing the pools to be disassociated from the CPU
4525 * running as an unbound one and allowing it to be reattached later if the
4526 * cpu comes back online.
4527 */
4528
4529static void wq_unbind_fn(struct work_struct *work)
4530{
4531        int cpu = smp_processor_id();
4532        struct worker_pool *pool;
4533        struct worker *worker;
4534
4535        for_each_cpu_worker_pool(pool, cpu) {
4536                mutex_lock(&pool->attach_mutex);
4537                spin_lock_irq(&pool->lock);
4538
4539                /*
4540                 * We've blocked all attach/detach operations. Make all workers
4541                 * unbound and set DISASSOCIATED.  Before this, all workers
4542                 * except for the ones which are still executing works from
4543                 * before the last CPU down must be on the cpu.  After
4544                 * this, they may become diasporas.
4545                 */
4546                for_each_pool_worker(worker, pool)
4547                        worker->flags |= WORKER_UNBOUND;
4548
4549                pool->flags |= POOL_DISASSOCIATED;
4550
4551                spin_unlock_irq(&pool->lock);
4552                mutex_unlock(&pool->attach_mutex);
4553
4554                /*
4555                 * Call schedule() so that we cross rq->lock and thus can
4556                 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
4557                 * This is necessary as scheduler callbacks may be invoked
4558                 * from other cpus.
4559                 */
4560                schedule();
4561
4562                /*
4563                 * Sched callbacks are disabled now.  Zap nr_running.
4564                 * After this, nr_running stays zero and need_more_worker()
4565                 * and keep_working() are always true as long as the
4566                 * worklist is not empty.  This pool now behaves as an
4567                 * unbound (in terms of concurrency management) pool which
4568                 * are served by workers tied to the pool.
4569                 */
4570                atomic_set(&pool->nr_running, 0);
4571
4572                /*
4573                 * With concurrency management just turned off, a busy
4574                 * worker blocking could lead to lengthy stalls.  Kick off
4575                 * unbound chain execution of currently pending work items.
4576                 */
4577                spin_lock_irq(&pool->lock);
4578                wake_up_worker(pool);
4579                spin_unlock_irq(&pool->lock);
4580        }
4581}
4582
4583/**
4584 * rebind_workers - rebind all workers of a pool to the associated CPU
4585 * @pool: pool of interest
4586 *
4587 * @pool->cpu is coming online.  Rebind all workers to the CPU.
4588 */
4589static void rebind_workers(struct worker_pool *pool)
4590{
4591        struct worker *worker;
4592
4593        lockdep_assert_held(&pool->attach_mutex);
4594
4595        /*
4596         * Restore CPU affinity of all workers.  As all idle workers should
4597         * be on the run-queue of the associated CPU before any local
4598         * wake-ups for concurrency management happen, restore CPU affinity
4599         * of all workers first and then clear UNBOUND.  As we're called
4600         * from CPU_ONLINE, the following shouldn't fail.
4601         */
4602        for_each_pool_worker(worker, pool)
4603                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4604                                                  pool->attrs->cpumask) < 0);
4605
4606        spin_lock_irq(&pool->lock);
4607
4608        /*
4609         * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
4610         * w/o preceding DOWN_PREPARE.  Work around it.  CPU hotplug is
4611         * being reworked and this can go away in time.
4612         */
4613        if (!(pool->flags & POOL_DISASSOCIATED)) {
4614                spin_unlock_irq(&pool->lock);
4615                return;
4616        }
4617
4618        pool->flags &= ~POOL_DISASSOCIATED;
4619
4620        for_each_pool_worker(worker, pool) {
4621                unsigned int worker_flags = worker->flags;
4622
4623                /*
4624                 * A bound idle worker should actually be on the runqueue
4625                 * of the associated CPU for local wake-ups targeting it to
4626                 * work.  Kick all idle workers so that they migrate to the
4627                 * associated CPU.  Doing this in the same loop as
4628                 * replacing UNBOUND with REBOUND is safe as no worker will
4629                 * be bound before @pool->lock is released.
4630                 */
4631                if (worker_flags & WORKER_IDLE)
4632                        wake_up_process(worker->task);
4633
4634                /*
4635                 * We want to clear UNBOUND but can't directly call
4636                 * worker_clr_flags() or adjust nr_running.  Atomically
4637                 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
4638                 * @worker will clear REBOUND using worker_clr_flags() when
4639                 * it initiates the next execution cycle thus restoring
4640                 * concurrency management.  Note that when or whether
4641                 * @worker clears REBOUND doesn't affect correctness.
4642                 *
4643                 * ACCESS_ONCE() is necessary because @worker->flags may be
4644                 * tested without holding any lock in
4645                 * wq_worker_waking_up().  Without it, NOT_RUNNING test may
4646                 * fail incorrectly leading to premature concurrency
4647                 * management operations.
4648                 */
4649                WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4650                worker_flags |= WORKER_REBOUND;
4651                worker_flags &= ~WORKER_UNBOUND;
4652                ACCESS_ONCE(worker->flags) = worker_flags;
4653        }
4654
4655        spin_unlock_irq(&pool->lock);
4656}
4657
4658/**
4659 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
4660 * @pool: unbound pool of interest
4661 * @cpu: the CPU which is coming up
4662 *
4663 * An unbound pool may end up with a cpumask which doesn't have any online
4664 * CPUs.  When a worker of such pool get scheduled, the scheduler resets
4665 * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
4666 * online CPU before, cpus_allowed of all its workers should be restored.
4667 */
4668static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4669{
4670        static cpumask_t cpumask;
4671        struct worker *worker;
4672
4673        lockdep_assert_held(&pool->attach_mutex);
4674
4675        /* is @cpu allowed for @pool? */
4676        if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
4677                return;
4678
4679        cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
4680
4681        /* as we're called from CPU_ONLINE, the following shouldn't fail */
4682        for_each_pool_worker(worker, pool)
4683                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
4684}
4685
4686int workqueue_prepare_cpu(unsigned int cpu)
4687{
4688        struct worker_pool *pool;
4689
4690        for_each_cpu_worker_pool(pool, cpu) {
4691                if (pool->nr_workers)
4692                        continue;
4693                if (!create_worker(pool))
4694                        return -ENOMEM;
4695        }
4696        return 0;
4697}
4698
4699int workqueue_online_cpu(unsigned int cpu)
4700{
4701        struct worker_pool *pool;
4702        struct workqueue_struct *wq;
4703        int pi;
4704
4705        mutex_lock(&wq_pool_mutex);
4706
4707        for_each_pool(pool, pi) {
4708                mutex_lock(&pool->attach_mutex);
4709
4710                if (pool->cpu == cpu)
4711                        rebind_workers(pool);
4712                else if (pool->cpu < 0)
4713                        restore_unbound_workers_cpumask(pool, cpu);
4714
4715                mutex_unlock(&pool->attach_mutex);
4716        }
4717
4718        /* update NUMA affinity of unbound workqueues */
4719        list_for_each_entry(wq, &workqueues, list)
4720                wq_update_unbound_numa(wq, cpu, true);
4721
4722        mutex_unlock(&wq_pool_mutex);
4723        return 0;
4724}
4725
4726int workqueue_offline_cpu(unsigned int cpu)
4727{
4728        struct work_struct unbind_work;
4729        struct workqueue_struct *wq;
4730
4731        /* unbinding per-cpu workers should happen on the local CPU */
4732        INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
4733        queue_work_on(cpu, system_highpri_wq, &unbind_work);
4734
4735        /* update NUMA affinity of unbound workqueues */
4736        mutex_lock(&wq_pool_mutex);
4737        list_for_each_entry(wq, &workqueues, list)
4738                wq_update_unbound_numa(wq, cpu, false);
4739        mutex_unlock(&wq_pool_mutex);
4740
4741        /* wait for per-cpu unbinding to finish */
4742        flush_work(&unbind_work);
4743        destroy_work_on_stack(&unbind_work);
4744        return 0;
4745}
4746
4747#ifdef CONFIG_SMP
4748
4749struct work_for_cpu {
4750        struct work_struct work;
4751        long (*fn)(void *);
4752        void *arg;
4753        long ret;
4754};
4755
4756static void work_for_cpu_fn(struct work_struct *work)
4757{
4758        struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
4759
4760        wfc->ret = wfc->fn(wfc->arg);
4761}
4762
4763/**
4764 * work_on_cpu - run a function in thread context on a particular cpu
4765 * @cpu: the cpu to run on
4766 * @fn: the function to run
4767 * @arg: the function arg
4768 *
4769 * It is up to the caller to ensure that the cpu doesn't go offline.
4770 * The caller must not hold any locks which would prevent @fn from completing.
4771 *
4772 * Return: The value @fn returns.
4773 */
4774long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4775{
4776        struct work_for_cpu wfc = { .fn = fn, .arg = arg };
4777
4778        INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4779        schedule_work_on(cpu, &wfc.work);
4780        flush_work(&wfc.work);
4781        destroy_work_on_stack(&wfc.work);
4782        return wfc.ret;
4783}
4784EXPORT_SYMBOL_GPL(work_on_cpu);
4785
4786/**
4787 * work_on_cpu_safe - run a function in thread context on a particular cpu
4788 * @cpu: the cpu to run on
4789 * @fn:  the function to run
4790 * @arg: the function argument
4791 *
4792 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
4793 * any locks which would prevent @fn from completing.
4794 *
4795 * Return: The value @fn returns.
4796 */
4797long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
4798{
4799        long ret = -ENODEV;
4800
4801        get_online_cpus();
4802        if (cpu_online(cpu))
4803                ret = work_on_cpu(cpu, fn, arg);
4804        put_online_cpus();
4805        return ret;
4806}
4807EXPORT_SYMBOL_GPL(work_on_cpu_safe);
4808#endif /* CONFIG_SMP */
4809
4810#ifdef CONFIG_FREEZER
4811
4812/**
4813 * freeze_workqueues_begin - begin freezing workqueues
4814 *
4815 * Start freezing workqueues.  After this function returns, all freezable
4816 * workqueues will queue new works to their delayed_works list instead of
4817 * pool->worklist.
4818 *
4819 * CONTEXT:
4820 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4821 */
4822void freeze_workqueues_begin(void)
4823{
4824        struct workqueue_struct *wq;
4825        struct pool_workqueue *pwq;
4826
4827        mutex_lock(&wq_pool_mutex);
4828
4829        WARN_ON_ONCE(workqueue_freezing);
4830        workqueue_freezing = true;
4831
4832        list_for_each_entry(wq, &workqueues, list) {
4833                mutex_lock(&wq->mutex);
4834                for_each_pwq(pwq, wq)
4835                        pwq_adjust_max_active(pwq);
4836                mutex_unlock(&wq->mutex);
4837        }
4838
4839        mutex_unlock(&wq_pool_mutex);
4840}
4841
4842/**
4843 * freeze_workqueues_busy - are freezable workqueues still busy?
4844 *
4845 * Check whether freezing is complete.  This function must be called
4846 * between freeze_workqueues_begin() and thaw_workqueues().
4847 *
4848 * CONTEXT:
4849 * Grabs and releases wq_pool_mutex.
4850 *
4851 * Return:
4852 * %true if some freezable workqueues are still busy.  %false if freezing
4853 * is complete.
4854 */
4855bool freeze_workqueues_busy(void)
4856{
4857        bool busy = false;
4858        struct workqueue_struct *wq;
4859        struct pool_workqueue *pwq;
4860
4861        mutex_lock(&wq_pool_mutex);
4862
4863        WARN_ON_ONCE(!workqueue_freezing);
4864
4865        list_for_each_entry(wq, &workqueues, list) {
4866                if (!(wq->flags & WQ_FREEZABLE))
4867                        continue;
4868                /*
4869                 * nr_active is monotonically decreasing.  It's safe
4870                 * to peek without lock.
4871                 */
4872                rcu_read_lock_sched();
4873                for_each_pwq(pwq, wq) {
4874                        WARN_ON_ONCE(pwq->nr_active < 0);
4875                        if (pwq->nr_active) {
4876                                busy = true;
4877                                rcu_read_unlock_sched();
4878                                goto out_unlock;
4879                        }
4880                }
4881                rcu_read_unlock_sched();
4882        }
4883out_unlock:
4884        mutex_unlock(&wq_pool_mutex);
4885        return busy;
4886}
4887
4888/**
4889 * thaw_workqueues - thaw workqueues
4890 *
4891 * Thaw workqueues.  Normal queueing is restored and all collected
4892 * frozen works are transferred to their respective pool worklists.
4893 *
4894 * CONTEXT:
4895 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4896 */
4897void thaw_workqueues(void)
4898{
4899        struct workqueue_struct *wq;
4900        struct pool_workqueue *pwq;
4901
4902        mutex_lock(&wq_pool_mutex);
4903
4904        if (!workqueue_freezing)
4905                goto out_unlock;
4906
4907        workqueue_freezing = false;
4908
4909        /* restore max_active and repopulate worklist */
4910        list_for_each_entry(wq, &workqueues, list) {
4911                mutex_lock(&wq->mutex);
4912                for_each_pwq(pwq, wq)
4913                        pwq_adjust_max_active(pwq);
4914                mutex_unlock(&wq->mutex);
4915        }
4916
4917out_unlock:
4918        mutex_unlock(&wq_pool_mutex);
4919}
4920#endif /* CONFIG_FREEZER */
4921
4922static int workqueue_apply_unbound_cpumask(void)
4923{
4924        LIST_HEAD(ctxs);
4925        int ret = 0;
4926        struct workqueue_struct *wq;
4927        struct apply_wqattrs_ctx *ctx, *n;
4928
4929        lockdep_assert_held(&wq_pool_mutex);
4930
4931        list_for_each_entry(wq, &workqueues, list) {
4932                if (!(wq->flags & WQ_UNBOUND))
4933                        continue;
4934                /* creating multiple pwqs breaks ordering guarantee */
4935                if (wq->flags & __WQ_ORDERED)
4936                        continue;
4937
4938                ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
4939                if (!ctx) {
4940                        ret = -ENOMEM;
4941                        break;
4942                }
4943
4944                list_add_tail(&ctx->list, &ctxs);
4945        }
4946
4947        list_for_each_entry_safe(ctx, n, &ctxs, list) {
4948                if (!ret)
4949                        apply_wqattrs_commit(ctx);
4950                apply_wqattrs_cleanup(ctx);
4951        }
4952
4953        return ret;
4954}
4955
4956/**
4957 *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
4958 *  @cpumask: the cpumask to set
4959 *
4960 *  The low-level workqueues cpumask is a global cpumask that limits
4961 *  the affinity of all unbound workqueues.  This function check the @cpumask
4962 *  and apply it to all unbound workqueues and updates all pwqs of them.
4963 *
4964 *  Retun:      0       - Success
4965 *              -EINVAL - Invalid @cpumask
4966 *              -ENOMEM - Failed to allocate memory for attrs or pwqs.
4967 */
4968int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
4969{
4970        int ret = -EINVAL;
4971        cpumask_var_t saved_cpumask;
4972
4973        if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
4974                return -ENOMEM;
4975
4976        cpumask_and(cpumask, cpumask, cpu_possible_mask);
4977        if (!cpumask_empty(cpumask)) {
4978                apply_wqattrs_lock();
4979
4980                /* save the old wq_unbound_cpumask. */
4981                cpumask_copy(saved_cpumask, wq_unbound_cpumask);
4982
4983                /* update wq_unbound_cpumask at first and apply it to wqs. */
4984                cpumask_copy(wq_unbound_cpumask, cpumask);
4985                ret = workqueue_apply_unbound_cpumask();
4986
4987                /* restore the wq_unbound_cpumask when failed. */
4988                if (ret < 0)
4989                        cpumask_copy(wq_unbound_cpumask, saved_cpumask);
4990
4991                apply_wqattrs_unlock();
4992        }
4993
4994        free_cpumask_var(saved_cpumask);
4995        return ret;
4996}
4997
4998#ifdef CONFIG_SYSFS
4999/*
5000 * Workqueues with WQ_SYSFS flag set is visible to userland via
5001 * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
5002 * following attributes.
5003 *
5004 *  per_cpu     RO bool : whether the workqueue is per-cpu or unbound
5005 *  max_active  RW int  : maximum number of in-flight work items
5006 *
5007 * Unbound workqueues have the following extra attributes.
5008 *
5009 *  id          RO int  : the associated pool ID
5010 *  nice        RW int  : nice value of the workers
5011 *  cpumask     RW mask : bitmask of allowed CPUs for the workers
5012 */
5013struct wq_device {
5014        struct workqueue_struct         *wq;
5015        struct device                   dev;
5016};
5017
5018static struct workqueue_struct *dev_to_wq(struct device *dev)
5019{
5020        struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5021
5022        return wq_dev->wq;
5023}
5024
5025static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5026                            char *buf)
5027{
5028        struct workqueue_struct *wq = dev_to_wq(dev);
5029
5030        return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5031}
5032static DEVICE_ATTR_RO(per_cpu);
5033
5034static ssize_t max_active_show(struct device *dev,
5035                               struct device_attribute *attr, char *buf)
5036{
5037        struct workqueue_struct *wq = dev_to_wq(dev);
5038
5039        return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5040}
5041
5042static ssize_t max_active_store(struct device *dev,
5043                                struct device_attribute *attr, const char *buf,
5044                                size_t count)
5045{
5046        struct workqueue_struct *wq = dev_to_wq(dev);
5047        int val;
5048
5049        if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5050                return -EINVAL;
5051
5052        workqueue_set_max_active(wq, val);
5053        return count;
5054}
5055static DEVICE_ATTR_RW(max_active);
5056
5057static struct attribute *wq_sysfs_attrs[] = {
5058        &dev_attr_per_cpu.attr,
5059        &dev_attr_max_active.attr,
5060        NULL,
5061};
5062ATTRIBUTE_GROUPS(wq_sysfs);
5063
5064static ssize_t wq_pool_ids_show(struct device *dev,
5065                                struct device_attribute *attr, char *buf)
5066{
5067        struct workqueue_struct *wq = dev_to_wq(dev);
5068        const char *delim = "";
5069        int node, written = 0;
5070
5071        rcu_read_lock_sched();
5072        for_each_node(node) {
5073                written += scnprintf(buf + written, PAGE_SIZE - written,
5074                                     "%s%d:%d", delim, node,
5075                                     unbound_pwq_by_node(wq, node)->pool->id);
5076                delim = " ";
5077        }
5078        written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
5079        rcu_read_unlock_sched();
5080
5081        return written;
5082}
5083
5084static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
5085                            char *buf)
5086{
5087        struct workqueue_struct *wq = dev_to_wq(dev);
5088        int written;
5089
5090        mutex_lock(&wq->mutex);
5091        written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
5092        mutex_unlock(&wq->mutex);
5093
5094        return written;
5095}
5096
5097/* prepare workqueue_attrs for sysfs store operations */
5098static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
5099{
5100        struct workqueue_attrs *attrs;
5101
5102        lockdep_assert_held(&wq_pool_mutex);
5103
5104        attrs = alloc_workqueue_attrs(GFP_KERNEL);
5105        if (!attrs)
5106                return NULL;
5107
5108        copy_workqueue_attrs(attrs, wq->unbound_attrs);
5109        return attrs;
5110}
5111
5112static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
5113                             const char *buf, size_t count)
5114{
5115        struct workqueue_struct *wq = dev_to_wq(dev);
5116        struct workqueue_attrs *attrs;
5117        int ret = -ENOMEM;
5118
5119        apply_wqattrs_lock();
5120
5121        attrs = wq_sysfs_prep_attrs(wq);
5122        if (!attrs)
5123                goto out_unlock;
5124
5125        if (sscanf(buf, "%d", &attrs->nice) == 1 &&
5126            attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
5127                ret = apply_workqueue_attrs_locked(wq, attrs);
5128        else
5129                ret = -EINVAL;
5130
5131out_unlock:
5132        apply_wqattrs_unlock();
5133        free_workqueue_attrs(attrs);
5134        return ret ?: count;
5135}
5136
5137static ssize_t wq_cpumask_show(struct device *dev,
5138                               struct device_attribute *attr, char *buf)
5139{
5140        struct workqueue_struct *wq = dev_to_wq(dev);
5141        int written;
5142
5143        mutex_lock(&wq->mutex);
5144        written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5145                            cpumask_pr_args(wq->unbound_attrs->cpumask));
5146        mutex_unlock(&wq->mutex);
5147        return written;
5148}
5149
5150static ssize_t wq_cpumask_store(struct device *dev,
5151                                struct device_attribute *attr,
5152                                const char *buf, size_t count)
5153{
5154        struct workqueue_struct *wq = dev_to_wq(dev);
5155        struct workqueue_attrs *attrs;
5156        int ret = -ENOMEM;
5157
5158        apply_wqattrs_lock();
5159
5160        attrs = wq_sysfs_prep_attrs(wq);
5161        if (!attrs)
5162                goto out_unlock;
5163
5164        ret = cpumask_parse(buf, attrs->cpumask);
5165        if (!ret)
5166                ret = apply_workqueue_attrs_locked(wq, attrs);
5167
5168out_unlock:
5169        apply_wqattrs_unlock();
5170        free_workqueue_attrs(attrs);
5171        return ret ?: count;
5172}
5173
5174static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
5175                            char *buf)
5176{
5177        struct workqueue_struct *wq = dev_to_wq(dev);
5178        int written;
5179
5180        mutex_lock(&wq->mutex);
5181        written = scnprintf(buf, PAGE_SIZE, "%d\n",
5182                            !wq->unbound_attrs->no_numa);
5183        mutex_unlock(&wq->mutex);
5184
5185        return written;
5186}
5187
5188static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
5189                             const char *buf, size_t count)
5190{
5191        struct workqueue_struct *wq = dev_to_wq(dev);
5192        struct workqueue_attrs *attrs;
5193        int v, ret = -ENOMEM;
5194
5195        apply_wqattrs_lock();
5196
5197        attrs = wq_sysfs_prep_attrs(wq);
5198        if (!attrs)
5199                goto out_unlock;
5200
5201        ret = -EINVAL;
5202        if (sscanf(buf, "%d", &v) == 1) {
5203                attrs->no_numa = !v;
5204                ret = apply_workqueue_attrs_locked(wq, attrs);
5205        }
5206
5207out_unlock:
5208        apply_wqattrs_unlock();
5209        free_workqueue_attrs(attrs);
5210        return ret ?: count;
5211}
5212
5213static struct device_attribute wq_sysfs_unbound_attrs[] = {
5214        __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
5215        __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
5216        __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
5217        __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
5218        __ATTR_NULL,
5219};
5220
5221static struct bus_type wq_subsys = {
5222        .name                           = "workqueue",
5223        .dev_groups                     = wq_sysfs_groups,
5224};
5225
5226static ssize_t wq_unbound_cpumask_show(struct device *dev,
5227                struct device_attribute *attr, char *buf)
5228{
5229        int written;
5230
5231        mutex_lock(&wq_pool_mutex);
5232        written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5233                            cpumask_pr_args(wq_unbound_cpumask));
5234        mutex_unlock(&wq_pool_mutex);
5235
5236        return written;
5237}
5238
5239static ssize_t wq_unbound_cpumask_store(struct device *dev,
5240                struct device_attribute *attr, const char *buf, size_t count)
5241{
5242        cpumask_var_t cpumask;
5243        int ret;
5244
5245        if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
5246                return -ENOMEM;
5247
5248        ret = cpumask_parse(buf, cpumask);
5249        if (!ret)
5250                ret = workqueue_set_unbound_cpumask(cpumask);
5251
5252        free_cpumask_var(cpumask);
5253        return ret ? ret : count;
5254}
5255
5256static struct device_attribute wq_sysfs_cpumask_attr =
5257        __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
5258               wq_unbound_cpumask_store);
5259
5260static int __init wq_sysfs_init(void)
5261{
5262        int err;
5263
5264        err = subsys_virtual_register(&wq_subsys, NULL);
5265        if (err)
5266                return err;
5267
5268        return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
5269}
5270core_initcall(wq_sysfs_init);
5271
5272static void wq_device_release(struct device *dev)
5273{
5274        struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5275
5276        kfree(wq_dev);
5277}
5278
5279/**
5280 * workqueue_sysfs_register - make a workqueue visible in sysfs
5281 * @wq: the workqueue to register
5282 *
5283 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
5284 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
5285 * which is the preferred method.
5286 *
5287 * Workqueue user should use this function directly iff it wants to apply
5288 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
5289 * apply_workqueue_attrs() may race against userland updating the
5290 * attributes.
5291 *
5292 * Return: 0 on success, -errno on failure.
5293 */
5294int workqueue_sysfs_register(struct workqueue_struct *wq)
5295{
5296        struct wq_device *wq_dev;
5297        int ret;
5298
5299        /*
5300         * Adjusting max_active or creating new pwqs by applying
5301         * attributes breaks ordering guarantee.  Disallow exposing ordered
5302         * workqueues.
5303         */
5304        if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5305                return -EINVAL;
5306
5307        wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
5308        if (!wq_dev)
5309                return -ENOMEM;
5310
5311        wq_dev->wq = wq;
5312        wq_dev->dev.bus = &wq_subsys;
5313        wq_dev->dev.release = wq_device_release;
5314        dev_set_name(&wq_dev->dev, "%s", wq->name);
5315
5316        /*
5317         * unbound_attrs are created separately.  Suppress uevent until
5318         * everything is ready.
5319         */
5320        dev_set_uevent_suppress(&wq_dev->dev, true);
5321
5322        ret = device_register(&wq_dev->dev);
5323        if (ret) {
5324                kfree(wq_dev);
5325                wq->wq_dev = NULL;
5326                return ret;
5327        }
5328
5329        if (wq->flags & WQ_UNBOUND) {
5330                struct device_attribute *attr;
5331
5332                for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
5333                        ret = device_create_file(&wq_dev->dev, attr);
5334                        if (ret) {
5335                                device_unregister(&wq_dev->dev);
5336                                wq->wq_dev = NULL;
5337                                return ret;
5338                        }
5339                }
5340        }
5341
5342        dev_set_uevent_suppress(&wq_dev->dev, false);
5343        kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
5344        return 0;
5345}
5346
5347/**
5348 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
5349 * @wq: the workqueue to unregister
5350 *
5351 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
5352 */
5353static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
5354{
5355        struct wq_device *wq_dev = wq->wq_dev;
5356
5357        if (!wq->wq_dev)
5358                return;
5359
5360        wq->wq_dev = NULL;
5361        device_unregister(&wq_dev->dev);
5362}
5363#else   /* CONFIG_SYSFS */
5364static void workqueue_sysfs_unregister(struct workqueue_struct *wq)     { }
5365#endif  /* CONFIG_SYSFS */
5366
5367/*
5368 * Workqueue watchdog.
5369 *
5370 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
5371 * flush dependency, a concurrency managed work item which stays RUNNING
5372 * indefinitely.  Workqueue stalls can be very difficult to debug as the
5373 * usual warning mechanisms don't trigger and internal workqueue state is
5374 * largely opaque.
5375 *
5376 * Workqueue watchdog monitors all worker pools periodically and dumps
5377 * state if some pools failed to make forward progress for a while where
5378 * forward progress is defined as the first item on ->worklist changing.
5379 *
5380 * This mechanism is controlled through the kernel parameter
5381 * "workqueue.watchdog_thresh" which can be updated at runtime through the
5382 * corresponding sysfs parameter file.
5383 */
5384#ifdef CONFIG_WQ_WATCHDOG
5385
5386static void wq_watchdog_timer_fn(unsigned long data);
5387
5388static unsigned long wq_watchdog_thresh = 30;
5389static struct timer_list wq_watchdog_timer =
5390        TIMER_DEFERRED_INITIALIZER(wq_watchdog_timer_fn, 0, 0);
5391
5392static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
5393static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
5394
5395static void wq_watchdog_reset_touched(void)
5396{
5397        int cpu;
5398
5399        wq_watchdog_touched = jiffies;
5400        for_each_possible_cpu(cpu)
5401                per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5402}
5403
5404static void wq_watchdog_timer_fn(unsigned long data)
5405{
5406        unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
5407        bool lockup_detected = false;
5408        struct worker_pool *pool;
5409        int pi;
5410
5411        if (!thresh)
5412                return;
5413
5414        rcu_read_lock();
5415
5416        for_each_pool(pool, pi) {
5417                unsigned long pool_ts, touched, ts;
5418
5419                if (list_empty(&pool->worklist))
5420                        continue;
5421
5422                /* get the latest of pool and touched timestamps */
5423                pool_ts = READ_ONCE(pool->watchdog_ts);
5424                touched = READ_ONCE(wq_watchdog_touched);
5425
5426                if (time_after(pool_ts, touched))
5427                        ts = pool_ts;
5428                else
5429                        ts = touched;
5430
5431                if (pool->cpu >= 0) {
5432                        unsigned long cpu_touched =
5433                                READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
5434                                                  pool->cpu));
5435                        if (time_after(cpu_touched, ts))
5436                                ts = cpu_touched;
5437                }
5438
5439                /* did we stall? */
5440                if (time_after(jiffies, ts + thresh)) {
5441                        lockup_detected = true;
5442                        pr_emerg("BUG: workqueue lockup - pool");
5443                        pr_cont_pool_info(pool);
5444                        pr_cont(" stuck for %us!\n",
5445                                jiffies_to_msecs(jiffies - pool_ts) / 1000);
5446                }
5447        }
5448
5449        rcu_read_unlock();
5450
5451        if (lockup_detected)
5452                show_workqueue_state();
5453
5454        wq_watchdog_reset_touched();
5455        mod_timer(&wq_watchdog_timer, jiffies + thresh);
5456}
5457
5458void wq_watchdog_touch(int cpu)
5459{
5460        if (cpu >= 0)
5461                per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5462        else
5463                wq_watchdog_touched = jiffies;
5464}
5465
5466static void wq_watchdog_set_thresh(unsigned long thresh)
5467{
5468        wq_watchdog_thresh = 0;
5469        del_timer_sync(&wq_watchdog_timer);
5470
5471        if (thresh) {
5472                wq_watchdog_thresh = thresh;
5473                wq_watchdog_reset_touched();
5474                mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
5475        }
5476}
5477
5478static int wq_watchdog_param_set_thresh(const char *val,
5479                                        const struct kernel_param *kp)
5480{
5481        unsigned long thresh;
5482        int ret;
5483
5484        ret = kstrtoul(val, 0, &thresh);
5485        if (ret)
5486                return ret;
5487
5488        if (system_wq)
5489                wq_watchdog_set_thresh(thresh);
5490        else
5491                wq_watchdog_thresh = thresh;
5492
5493        return 0;
5494}
5495
5496static const struct kernel_param_ops wq_watchdog_thresh_ops = {
5497        .set    = wq_watchdog_param_set_thresh,
5498        .get    = param_get_ulong,
5499};
5500
5501module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
5502                0644);
5503
5504static void wq_watchdog_init(void)
5505{
5506        wq_watchdog_set_thresh(wq_watchdog_thresh);
5507}
5508
5509#else   /* CONFIG_WQ_WATCHDOG */
5510
5511static inline void wq_watchdog_init(void) { }
5512
5513#endif  /* CONFIG_WQ_WATCHDOG */
5514
5515static void __init wq_numa_init(void)
5516{
5517        cpumask_var_t *tbl;
5518        int node, cpu;
5519
5520        if (num_possible_nodes() <= 1)
5521                return;
5522
5523        if (wq_disable_numa) {
5524                pr_info("workqueue: NUMA affinity support disabled\n");
5525                return;
5526        }
5527
5528        wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
5529        BUG_ON(!wq_update_unbound_numa_attrs_buf);
5530
5531        /*
5532         * We want masks of possible CPUs of each node which isn't readily
5533         * available.  Build one from cpu_to_node() which should have been
5534         * fully initialized by now.
5535         */
5536        tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL);
5537        BUG_ON(!tbl);
5538
5539        for_each_node(node)
5540                BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
5541                                node_online(node) ? node : NUMA_NO_NODE));
5542
5543        for_each_possible_cpu(cpu) {
5544                node = cpu_to_node(cpu);
5545                if (WARN_ON(node == NUMA_NO_NODE)) {
5546                        pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
5547                        /* happens iff arch is bonkers, let's just proceed */
5548                        return;
5549                }
5550                cpumask_set_cpu(cpu, tbl[node]);
5551        }
5552
5553        wq_numa_possible_cpumask = tbl;
5554        wq_numa_enabled = true;
5555}
5556
5557/**
5558 * workqueue_init_early - early init for workqueue subsystem
5559 *
5560 * This is the first half of two-staged workqueue subsystem initialization
5561 * and invoked as soon as the bare basics - memory allocation, cpumasks and
5562 * idr are up.  It sets up all the data structures and system workqueues
5563 * and allows early boot code to create workqueues and queue/cancel work
5564 * items.  Actual work item execution starts only after kthreads can be
5565 * created and scheduled right before early initcalls.
5566 */
5567int __init workqueue_init_early(void)
5568{
5569        int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5570        int i, cpu;
5571
5572        WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5573
5574        BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
5575        cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
5576
5577        pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
5578
5579        /* initialize CPU pools */
5580        for_each_possible_cpu(cpu) {
5581                struct worker_pool *pool;
5582
5583                i = 0;
5584                for_each_cpu_worker_pool(pool, cpu) {
5585                        BUG_ON(init_worker_pool(pool));
5586                        pool->cpu = cpu;
5587                        cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
5588                        pool->attrs->nice = std_nice[i++];
5589                        pool->node = cpu_to_node(cpu);
5590
5591                        /* alloc pool ID */
5592                        mutex_lock(&wq_pool_mutex);
5593                        BUG_ON(worker_pool_assign_id(pool));
5594                        mutex_unlock(&wq_pool_mutex);
5595                }
5596        }
5597
5598        /* create default unbound and ordered wq attrs */
5599        for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5600                struct workqueue_attrs *attrs;
5601
5602                BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5603                attrs->nice = std_nice[i];
5604                unbound_std_wq_attrs[i] = attrs;
5605
5606                /*
5607                 * An ordered wq should have only one pwq as ordering is
5608                 * guaranteed by max_active which is enforced by pwqs.
5609                 * Turn off NUMA so that dfl_pwq is used for all nodes.
5610                 */
5611                BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5612                attrs->nice = std_nice[i];
5613                attrs->no_numa = true;
5614                ordered_wq_attrs[i] = attrs;
5615        }
5616
5617        system_wq = alloc_workqueue("events", 0, 0);
5618        system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
5619        system_long_wq = alloc_workqueue("events_long", 0, 0);
5620        system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
5621                                            WQ_UNBOUND_MAX_ACTIVE);
5622        system_freezable_wq = alloc_workqueue("events_freezable",
5623                                              WQ_FREEZABLE, 0);
5624        system_power_efficient_wq = alloc_workqueue("events_power_efficient",
5625                                              WQ_POWER_EFFICIENT, 0);
5626        system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
5627                                              WQ_FREEZABLE | WQ_POWER_EFFICIENT,
5628                                              0);
5629        BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
5630               !system_unbound_wq || !system_freezable_wq ||
5631               !system_power_efficient_wq ||
5632               !system_freezable_power_efficient_wq);
5633
5634        return 0;
5635}
5636
5637/**
5638 * workqueue_init - bring workqueue subsystem fully online
5639 *
5640 * This is the latter half of two-staged workqueue subsystem initialization
5641 * and invoked as soon as kthreads can be created and scheduled.
5642 * Workqueues have been created and work items queued on them, but there
5643 * are no kworkers executing the work items yet.  Populate the worker pools
5644 * with the initial workers and enable future kworker creations.
5645 */
5646int __init workqueue_init(void)
5647{
5648        struct workqueue_struct *wq;
5649        struct worker_pool *pool;
5650        int cpu, bkt;
5651
5652        /*
5653         * It'd be simpler to initialize NUMA in workqueue_init_early() but
5654         * CPU to node mapping may not be available that early on some
5655         * archs such as power and arm64.  As per-cpu pools created
5656         * previously could be missing node hint and unbound pools NUMA
5657         * affinity, fix them up.
5658         */
5659        wq_numa_init();
5660
5661        mutex_lock(&wq_pool_mutex);
5662
5663        for_each_possible_cpu(cpu) {
5664                for_each_cpu_worker_pool(pool, cpu) {
5665                        pool->node = cpu_to_node(cpu);
5666                }
5667        }
5668
5669        list_for_each_entry(wq, &workqueues, list)
5670                wq_update_unbound_numa(wq, smp_processor_id(), true);
5671
5672        mutex_unlock(&wq_pool_mutex);
5673
5674        /* create the initial workers */
5675        for_each_online_cpu(cpu) {
5676                for_each_cpu_worker_pool(pool, cpu) {
5677                        pool->flags &= ~POOL_DISASSOCIATED;
5678                        BUG_ON(!create_worker(pool));
5679                }
5680        }
5681
5682        hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
5683                BUG_ON(!create_worker(pool));
5684
5685        wq_online = true;
5686        wq_watchdog_init();
5687
5688        return 0;
5689}
5690