linux/kernel/workqueue.c
<<
>>
Prefs
   1/*
   2 * kernel/workqueue.c - generic async execution with shared worker pool
   3 *
   4 * Copyright (C) 2002           Ingo Molnar
   5 *
   6 *   Derived from the taskqueue/keventd code by:
   7 *     David Woodhouse <dwmw2@infradead.org>
   8 *     Andrew Morton
   9 *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
  10 *     Theodore Ts'o <tytso@mit.edu>
  11 *
  12 * Made to use alloc_percpu by Christoph Lameter.
  13 *
  14 * Copyright (C) 2010           SUSE Linux Products GmbH
  15 * Copyright (C) 2010           Tejun Heo <tj@kernel.org>
  16 *
  17 * This is the generic async execution mechanism.  Work items as are
  18 * executed in process context.  The worker pool is shared and
  19 * automatically managed.  There are two worker pools for each CPU (one for
  20 * normal work items and the other for high priority ones) and some extra
  21 * pools for workqueues which are not bound to any specific CPU - the
  22 * number of these backing pools is dynamic.
  23 *
  24 * Please read Documentation/workqueue.txt for details.
  25 */
  26
  27#include <linux/export.h>
  28#include <linux/kernel.h>
  29#include <linux/sched.h>
  30#include <linux/init.h>
  31#include <linux/signal.h>
  32#include <linux/completion.h>
  33#include <linux/workqueue.h>
  34#include <linux/slab.h>
  35#include <linux/cpu.h>
  36#include <linux/notifier.h>
  37#include <linux/kthread.h>
  38#include <linux/hardirq.h>
  39#include <linux/mempolicy.h>
  40#include <linux/freezer.h>
  41#include <linux/kallsyms.h>
  42#include <linux/debug_locks.h>
  43#include <linux/lockdep.h>
  44#include <linux/idr.h>
  45#include <linux/jhash.h>
  46#include <linux/hashtable.h>
  47#include <linux/rculist.h>
  48#include <linux/nodemask.h>
  49#include <linux/moduleparam.h>
  50#include <linux/uaccess.h>
  51
  52#include "workqueue_internal.h"
  53
  54enum {
  55        /*
  56         * worker_pool flags
  57         *
  58         * A bound pool is either associated or disassociated with its CPU.
  59         * While associated (!DISASSOCIATED), all workers are bound to the
  60         * CPU and none has %WORKER_UNBOUND set and concurrency management
  61         * is in effect.
  62         *
  63         * While DISASSOCIATED, the cpu may be offline and all workers have
  64         * %WORKER_UNBOUND set and concurrency management disabled, and may
  65         * be executing on any CPU.  The pool behaves as an unbound one.
  66         *
  67         * Note that DISASSOCIATED should be flipped only while holding
  68         * attach_mutex to avoid changing binding state while
  69         * worker_attach_to_pool() is in progress.
  70         */
  71        POOL_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
  72
  73        /* worker flags */
  74        WORKER_DIE              = 1 << 1,       /* die die die */
  75        WORKER_IDLE             = 1 << 2,       /* is idle */
  76        WORKER_PREP             = 1 << 3,       /* preparing to run works */
  77        WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
  78        WORKER_UNBOUND          = 1 << 7,       /* worker is unbound */
  79        WORKER_REBOUND          = 1 << 8,       /* worker was rebound */
  80
  81        WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_CPU_INTENSIVE |
  82                                  WORKER_UNBOUND | WORKER_REBOUND,
  83
  84        NR_STD_WORKER_POOLS     = 2,            /* # standard pools per cpu */
  85
  86        UNBOUND_POOL_HASH_ORDER = 6,            /* hashed by pool->attrs */
  87        BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
  88
  89        MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
  90        IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
  91
  92        MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
  93                                                /* call for help after 10ms
  94                                                   (min two ticks) */
  95        MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
  96        CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
  97
  98        /*
  99         * Rescue workers are used only on emergencies and shared by
 100         * all cpus.  Give MIN_NICE.
 101         */
 102        RESCUER_NICE_LEVEL      = MIN_NICE,
 103        HIGHPRI_NICE_LEVEL      = MIN_NICE,
 104
 105        WQ_NAME_LEN             = 24,
 106};
 107
 108/*
 109 * Structure fields follow one of the following exclusion rules.
 110 *
 111 * I: Modifiable by initialization/destruction paths and read-only for
 112 *    everyone else.
 113 *
 114 * P: Preemption protected.  Disabling preemption is enough and should
 115 *    only be modified and accessed from the local cpu.
 116 *
 117 * L: pool->lock protected.  Access with pool->lock held.
 118 *
 119 * X: During normal operation, modification requires pool->lock and should
 120 *    be done only from local cpu.  Either disabling preemption on local
 121 *    cpu or grabbing pool->lock is enough for read access.  If
 122 *    POOL_DISASSOCIATED is set, it's identical to L.
 123 *
 124 * A: pool->attach_mutex protected.
 125 *
 126 * PL: wq_pool_mutex protected.
 127 *
 128 * PR: wq_pool_mutex protected for writes.  Sched-RCU protected for reads.
 129 *
 130 * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
 131 *
 132 * PWR: wq_pool_mutex and wq->mutex protected for writes.  Either or
 133 *      sched-RCU for reads.
 134 *
 135 * WQ: wq->mutex protected.
 136 *
 137 * WR: wq->mutex protected for writes.  Sched-RCU protected for reads.
 138 *
 139 * MD: wq_mayday_lock protected.
 140 */
 141
 142/* struct worker is defined in workqueue_internal.h */
 143
 144struct worker_pool {
 145        spinlock_t              lock;           /* the pool lock */
 146        int                     cpu;            /* I: the associated cpu */
 147        int                     node;           /* I: the associated node ID */
 148        int                     id;             /* I: pool ID */
 149        unsigned int            flags;          /* X: flags */
 150
 151        unsigned long           watchdog_ts;    /* L: watchdog timestamp */
 152
 153        struct list_head        worklist;       /* L: list of pending works */
 154        int                     nr_workers;     /* L: total number of workers */
 155
 156        /* nr_idle includes the ones off idle_list for rebinding */
 157        int                     nr_idle;        /* L: currently idle ones */
 158
 159        struct list_head        idle_list;      /* X: list of idle workers */
 160        struct timer_list       idle_timer;     /* L: worker idle timeout */
 161        struct timer_list       mayday_timer;   /* L: SOS timer for workers */
 162
 163        /* a workers is either on busy_hash or idle_list, or the manager */
 164        DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
 165                                                /* L: hash of busy workers */
 166
 167        /* see manage_workers() for details on the two manager mutexes */
 168        struct mutex            manager_arb;    /* manager arbitration */
 169        struct worker           *manager;       /* L: purely informational */
 170        struct mutex            attach_mutex;   /* attach/detach exclusion */
 171        struct list_head        workers;        /* A: attached workers */
 172        struct completion       *detach_completion; /* all workers detached */
 173
 174        struct ida              worker_ida;     /* worker IDs for task name */
 175
 176        struct workqueue_attrs  *attrs;         /* I: worker attributes */
 177        struct hlist_node       hash_node;      /* PL: unbound_pool_hash node */
 178        int                     refcnt;         /* PL: refcnt for unbound pools */
 179
 180        /*
 181         * The current concurrency level.  As it's likely to be accessed
 182         * from other CPUs during try_to_wake_up(), put it in a separate
 183         * cacheline.
 184         */
 185        atomic_t                nr_running ____cacheline_aligned_in_smp;
 186
 187        /*
 188         * Destruction of pool is sched-RCU protected to allow dereferences
 189         * from get_work_pool().
 190         */
 191        struct rcu_head         rcu;
 192} ____cacheline_aligned_in_smp;
 193
 194/*
 195 * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
 196 * of work_struct->data are used for flags and the remaining high bits
 197 * point to the pwq; thus, pwqs need to be aligned at two's power of the
 198 * number of flag bits.
 199 */
 200struct pool_workqueue {
 201        struct worker_pool      *pool;          /* I: the associated pool */
 202        struct workqueue_struct *wq;            /* I: the owning workqueue */
 203        int                     work_color;     /* L: current color */
 204        int                     flush_color;    /* L: flushing color */
 205        int                     refcnt;         /* L: reference count */
 206        int                     nr_in_flight[WORK_NR_COLORS];
 207                                                /* L: nr of in_flight works */
 208        int                     nr_active;      /* L: nr of active works */
 209        int                     max_active;     /* L: max active works */
 210        struct list_head        delayed_works;  /* L: delayed works */
 211        struct list_head        pwqs_node;      /* WR: node on wq->pwqs */
 212        struct list_head        mayday_node;    /* MD: node on wq->maydays */
 213
 214        /*
 215         * Release of unbound pwq is punted to system_wq.  See put_pwq()
 216         * and pwq_unbound_release_workfn() for details.  pool_workqueue
 217         * itself is also sched-RCU protected so that the first pwq can be
 218         * determined without grabbing wq->mutex.
 219         */
 220        struct work_struct      unbound_release_work;
 221        struct rcu_head         rcu;
 222} __aligned(1 << WORK_STRUCT_FLAG_BITS);
 223
 224/*
 225 * Structure used to wait for workqueue flush.
 226 */
 227struct wq_flusher {
 228        struct list_head        list;           /* WQ: list of flushers */
 229        int                     flush_color;    /* WQ: flush color waiting for */
 230        struct completion       done;           /* flush completion */
 231};
 232
 233struct wq_device;
 234
 235/*
 236 * The externally visible workqueue.  It relays the issued work items to
 237 * the appropriate worker_pool through its pool_workqueues.
 238 */
 239struct workqueue_struct {
 240        struct list_head        pwqs;           /* WR: all pwqs of this wq */
 241        struct list_head        list;           /* PR: list of all workqueues */
 242
 243        struct mutex            mutex;          /* protects this wq */
 244        int                     work_color;     /* WQ: current work color */
 245        int                     flush_color;    /* WQ: current flush color */
 246        atomic_t                nr_pwqs_to_flush; /* flush in progress */
 247        struct wq_flusher       *first_flusher; /* WQ: first flusher */
 248        struct list_head        flusher_queue;  /* WQ: flush waiters */
 249        struct list_head        flusher_overflow; /* WQ: flush overflow list */
 250
 251        struct list_head        maydays;        /* MD: pwqs requesting rescue */
 252        struct worker           *rescuer;       /* I: rescue worker */
 253
 254        int                     nr_drainers;    /* WQ: drain in progress */
 255        int                     saved_max_active; /* WQ: saved pwq max_active */
 256
 257        struct workqueue_attrs  *unbound_attrs; /* PW: only for unbound wqs */
 258        struct pool_workqueue   *dfl_pwq;       /* PW: only for unbound wqs */
 259
 260#ifdef CONFIG_SYSFS
 261        struct wq_device        *wq_dev;        /* I: for sysfs interface */
 262#endif
 263#ifdef CONFIG_LOCKDEP
 264        struct lockdep_map      lockdep_map;
 265#endif
 266        char                    name[WQ_NAME_LEN]; /* I: workqueue name */
 267
 268        /*
 269         * Destruction of workqueue_struct is sched-RCU protected to allow
 270         * walking the workqueues list without grabbing wq_pool_mutex.
 271         * This is used to dump all workqueues from sysrq.
 272         */
 273        struct rcu_head         rcu;
 274
 275        /* hot fields used during command issue, aligned to cacheline */
 276        unsigned int            flags ____cacheline_aligned; /* WQ: WQ_* flags */
 277        struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
 278        struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
 279};
 280
 281static struct kmem_cache *pwq_cache;
 282
 283static cpumask_var_t *wq_numa_possible_cpumask;
 284                                        /* possible CPUs of each node */
 285
 286static bool wq_disable_numa;
 287module_param_named(disable_numa, wq_disable_numa, bool, 0444);
 288
 289/* see the comment above the definition of WQ_POWER_EFFICIENT */
 290static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
 291module_param_named(power_efficient, wq_power_efficient, bool, 0444);
 292
 293static bool wq_numa_enabled;            /* unbound NUMA affinity enabled */
 294
 295/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
 296static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
 297
 298static DEFINE_MUTEX(wq_pool_mutex);     /* protects pools and workqueues list */
 299static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
 300
 301static LIST_HEAD(workqueues);           /* PR: list of all workqueues */
 302static bool workqueue_freezing;         /* PL: have wqs started freezing? */
 303
 304/* PL: allowable cpus for unbound wqs and work items */
 305static cpumask_var_t wq_unbound_cpumask;
 306
 307/* CPU where unbound work was last round robin scheduled from this CPU */
 308static DEFINE_PER_CPU(int, wq_rr_cpu_last);
 309
 310/*
 311 * Local execution of unbound work items is no longer guaranteed.  The
 312 * following always forces round-robin CPU selection on unbound work items
 313 * to uncover usages which depend on it.
 314 */
 315#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
 316static bool wq_debug_force_rr_cpu = true;
 317#else
 318static bool wq_debug_force_rr_cpu = false;
 319#endif
 320module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
 321
 322/* the per-cpu worker pools */
 323static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
 324
 325static DEFINE_IDR(worker_pool_idr);     /* PR: idr of all pools */
 326
 327/* PL: hash of all unbound pools keyed by pool->attrs */
 328static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
 329
 330/* I: attributes used when instantiating standard unbound pools on demand */
 331static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
 332
 333/* I: attributes used when instantiating ordered pools on demand */
 334static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
 335
 336struct workqueue_struct *system_wq __read_mostly;
 337EXPORT_SYMBOL(system_wq);
 338struct workqueue_struct *system_highpri_wq __read_mostly;
 339EXPORT_SYMBOL_GPL(system_highpri_wq);
 340struct workqueue_struct *system_long_wq __read_mostly;
 341EXPORT_SYMBOL_GPL(system_long_wq);
 342struct workqueue_struct *system_unbound_wq __read_mostly;
 343EXPORT_SYMBOL_GPL(system_unbound_wq);
 344struct workqueue_struct *system_freezable_wq __read_mostly;
 345EXPORT_SYMBOL_GPL(system_freezable_wq);
 346struct workqueue_struct *system_power_efficient_wq __read_mostly;
 347EXPORT_SYMBOL_GPL(system_power_efficient_wq);
 348struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
 349EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
 350
 351static int worker_thread(void *__worker);
 352static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
 353
 354#define CREATE_TRACE_POINTS
 355#include <trace/events/workqueue.h>
 356
 357#define assert_rcu_or_pool_mutex()                                      \
 358        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&                 \
 359                         !lockdep_is_held(&wq_pool_mutex),              \
 360                         "sched RCU or wq_pool_mutex should be held")
 361
 362#define assert_rcu_or_wq_mutex(wq)                                      \
 363        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&                 \
 364                         !lockdep_is_held(&wq->mutex),                  \
 365                         "sched RCU or wq->mutex should be held")
 366
 367#define assert_rcu_or_wq_mutex_or_pool_mutex(wq)                        \
 368        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&                 \
 369                         !lockdep_is_held(&wq->mutex) &&                \
 370                         !lockdep_is_held(&wq_pool_mutex),              \
 371                         "sched RCU, wq->mutex or wq_pool_mutex should be held")
 372
 373#define for_each_cpu_worker_pool(pool, cpu)                             \
 374        for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];               \
 375             (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
 376             (pool)++)
 377
 378/**
 379 * for_each_pool - iterate through all worker_pools in the system
 380 * @pool: iteration cursor
 381 * @pi: integer used for iteration
 382 *
 383 * This must be called either with wq_pool_mutex held or sched RCU read
 384 * locked.  If the pool needs to be used beyond the locking in effect, the
 385 * caller is responsible for guaranteeing that the pool stays online.
 386 *
 387 * The if/else clause exists only for the lockdep assertion and can be
 388 * ignored.
 389 */
 390#define for_each_pool(pool, pi)                                         \
 391        idr_for_each_entry(&worker_pool_idr, pool, pi)                  \
 392                if (({ assert_rcu_or_pool_mutex(); false; })) { }       \
 393                else
 394
 395/**
 396 * for_each_pool_worker - iterate through all workers of a worker_pool
 397 * @worker: iteration cursor
 398 * @pool: worker_pool to iterate workers of
 399 *
 400 * This must be called with @pool->attach_mutex.
 401 *
 402 * The if/else clause exists only for the lockdep assertion and can be
 403 * ignored.
 404 */
 405#define for_each_pool_worker(worker, pool)                              \
 406        list_for_each_entry((worker), &(pool)->workers, node)           \
 407                if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \
 408                else
 409
 410/**
 411 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
 412 * @pwq: iteration cursor
 413 * @wq: the target workqueue
 414 *
 415 * This must be called either with wq->mutex held or sched RCU read locked.
 416 * If the pwq needs to be used beyond the locking in effect, the caller is
 417 * responsible for guaranteeing that the pwq stays online.
 418 *
 419 * The if/else clause exists only for the lockdep assertion and can be
 420 * ignored.
 421 */
 422#define for_each_pwq(pwq, wq)                                           \
 423        list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node)          \
 424                if (({ assert_rcu_or_wq_mutex(wq); false; })) { }       \
 425                else
 426
 427#ifdef CONFIG_DEBUG_OBJECTS_WORK
 428
 429static struct debug_obj_descr work_debug_descr;
 430
 431static void *work_debug_hint(void *addr)
 432{
 433        return ((struct work_struct *) addr)->func;
 434}
 435
 436/*
 437 * fixup_init is called when:
 438 * - an active object is initialized
 439 */
 440static int work_fixup_init(void *addr, enum debug_obj_state state)
 441{
 442        struct work_struct *work = addr;
 443
 444        switch (state) {
 445        case ODEBUG_STATE_ACTIVE:
 446                cancel_work_sync(work);
 447                debug_object_init(work, &work_debug_descr);
 448                return 1;
 449        default:
 450                return 0;
 451        }
 452}
 453
 454/*
 455 * fixup_activate is called when:
 456 * - an active object is activated
 457 * - an unknown object is activated (might be a statically initialized object)
 458 */
 459static int work_fixup_activate(void *addr, enum debug_obj_state state)
 460{
 461        struct work_struct *work = addr;
 462
 463        switch (state) {
 464
 465        case ODEBUG_STATE_NOTAVAILABLE:
 466                /*
 467                 * This is not really a fixup. The work struct was
 468                 * statically initialized. We just make sure that it
 469                 * is tracked in the object tracker.
 470                 */
 471                if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
 472                        debug_object_init(work, &work_debug_descr);
 473                        debug_object_activate(work, &work_debug_descr);
 474                        return 0;
 475                }
 476                WARN_ON_ONCE(1);
 477                return 0;
 478
 479        case ODEBUG_STATE_ACTIVE:
 480                WARN_ON(1);
 481
 482        default:
 483                return 0;
 484        }
 485}
 486
 487/*
 488 * fixup_free is called when:
 489 * - an active object is freed
 490 */
 491static int work_fixup_free(void *addr, enum debug_obj_state state)
 492{
 493        struct work_struct *work = addr;
 494
 495        switch (state) {
 496        case ODEBUG_STATE_ACTIVE:
 497                cancel_work_sync(work);
 498                debug_object_free(work, &work_debug_descr);
 499                return 1;
 500        default:
 501                return 0;
 502        }
 503}
 504
 505static struct debug_obj_descr work_debug_descr = {
 506        .name           = "work_struct",
 507        .debug_hint     = work_debug_hint,
 508        .fixup_init     = work_fixup_init,
 509        .fixup_activate = work_fixup_activate,
 510        .fixup_free     = work_fixup_free,
 511};
 512
 513static inline void debug_work_activate(struct work_struct *work)
 514{
 515        debug_object_activate(work, &work_debug_descr);
 516}
 517
 518static inline void debug_work_deactivate(struct work_struct *work)
 519{
 520        debug_object_deactivate(work, &work_debug_descr);
 521}
 522
 523void __init_work(struct work_struct *work, int onstack)
 524{
 525        if (onstack)
 526                debug_object_init_on_stack(work, &work_debug_descr);
 527        else
 528                debug_object_init(work, &work_debug_descr);
 529}
 530EXPORT_SYMBOL_GPL(__init_work);
 531
 532void destroy_work_on_stack(struct work_struct *work)
 533{
 534        debug_object_free(work, &work_debug_descr);
 535}
 536EXPORT_SYMBOL_GPL(destroy_work_on_stack);
 537
 538void destroy_delayed_work_on_stack(struct delayed_work *work)
 539{
 540        destroy_timer_on_stack(&work->timer);
 541        debug_object_free(&work->work, &work_debug_descr);
 542}
 543EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
 544
 545#else
 546static inline void debug_work_activate(struct work_struct *work) { }
 547static inline void debug_work_deactivate(struct work_struct *work) { }
 548#endif
 549
 550/**
 551 * worker_pool_assign_id - allocate ID and assing it to @pool
 552 * @pool: the pool pointer of interest
 553 *
 554 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
 555 * successfully, -errno on failure.
 556 */
 557static int worker_pool_assign_id(struct worker_pool *pool)
 558{
 559        int ret;
 560
 561        lockdep_assert_held(&wq_pool_mutex);
 562
 563        ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
 564                        GFP_KERNEL);
 565        if (ret >= 0) {
 566                pool->id = ret;
 567                return 0;
 568        }
 569        return ret;
 570}
 571
 572/**
 573 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
 574 * @wq: the target workqueue
 575 * @node: the node ID
 576 *
 577 * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
 578 * read locked.
 579 * If the pwq needs to be used beyond the locking in effect, the caller is
 580 * responsible for guaranteeing that the pwq stays online.
 581 *
 582 * Return: The unbound pool_workqueue for @node.
 583 */
 584static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
 585                                                  int node)
 586{
 587        assert_rcu_or_wq_mutex_or_pool_mutex(wq);
 588
 589        /*
 590         * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
 591         * delayed item is pending.  The plan is to keep CPU -> NODE
 592         * mapping valid and stable across CPU on/offlines.  Once that
 593         * happens, this workaround can be removed.
 594         */
 595        if (unlikely(node == NUMA_NO_NODE))
 596                return wq->dfl_pwq;
 597
 598        return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
 599}
 600
 601static unsigned int work_color_to_flags(int color)
 602{
 603        return color << WORK_STRUCT_COLOR_SHIFT;
 604}
 605
 606static int get_work_color(struct work_struct *work)
 607{
 608        return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
 609                ((1 << WORK_STRUCT_COLOR_BITS) - 1);
 610}
 611
 612static int work_next_color(int color)
 613{
 614        return (color + 1) % WORK_NR_COLORS;
 615}
 616
 617/*
 618 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
 619 * contain the pointer to the queued pwq.  Once execution starts, the flag
 620 * is cleared and the high bits contain OFFQ flags and pool ID.
 621 *
 622 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
 623 * and clear_work_data() can be used to set the pwq, pool or clear
 624 * work->data.  These functions should only be called while the work is
 625 * owned - ie. while the PENDING bit is set.
 626 *
 627 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
 628 * corresponding to a work.  Pool is available once the work has been
 629 * queued anywhere after initialization until it is sync canceled.  pwq is
 630 * available only while the work item is queued.
 631 *
 632 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
 633 * canceled.  While being canceled, a work item may have its PENDING set
 634 * but stay off timer and worklist for arbitrarily long and nobody should
 635 * try to steal the PENDING bit.
 636 */
 637static inline void set_work_data(struct work_struct *work, unsigned long data,
 638                                 unsigned long flags)
 639{
 640        WARN_ON_ONCE(!work_pending(work));
 641        atomic_long_set(&work->data, data | flags | work_static(work));
 642}
 643
 644static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
 645                         unsigned long extra_flags)
 646{
 647        set_work_data(work, (unsigned long)pwq,
 648                      WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
 649}
 650
 651static void set_work_pool_and_keep_pending(struct work_struct *work,
 652                                           int pool_id)
 653{
 654        set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
 655                      WORK_STRUCT_PENDING);
 656}
 657
 658static void set_work_pool_and_clear_pending(struct work_struct *work,
 659                                            int pool_id)
 660{
 661        /*
 662         * The following wmb is paired with the implied mb in
 663         * test_and_set_bit(PENDING) and ensures all updates to @work made
 664         * here are visible to and precede any updates by the next PENDING
 665         * owner.
 666         */
 667        smp_wmb();
 668        set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
 669        /*
 670         * The following mb guarantees that previous clear of a PENDING bit
 671         * will not be reordered with any speculative LOADS or STORES from
 672         * work->current_func, which is executed afterwards.  This possible
 673         * reordering can lead to a missed execution on attempt to qeueue
 674         * the same @work.  E.g. consider this case:
 675         *
 676         *   CPU#0                         CPU#1
 677         *   ----------------------------  --------------------------------
 678         *
 679         * 1  STORE event_indicated
 680         * 2  queue_work_on() {
 681         * 3    test_and_set_bit(PENDING)
 682         * 4 }                             set_..._and_clear_pending() {
 683         * 5                                 set_work_data() # clear bit
 684         * 6                                 smp_mb()
 685         * 7                               work->current_func() {
 686         * 8                                  LOAD event_indicated
 687         *                                 }
 688         *
 689         * Without an explicit full barrier speculative LOAD on line 8 can
 690         * be executed before CPU#0 does STORE on line 1.  If that happens,
 691         * CPU#0 observes the PENDING bit is still set and new execution of
 692         * a @work is not queued in a hope, that CPU#1 will eventually
 693         * finish the queued @work.  Meanwhile CPU#1 does not see
 694         * event_indicated is set, because speculative LOAD was executed
 695         * before actual STORE.
 696         */
 697        smp_mb();
 698}
 699
 700static void clear_work_data(struct work_struct *work)
 701{
 702        smp_wmb();      /* see set_work_pool_and_clear_pending() */
 703        set_work_data(work, WORK_STRUCT_NO_POOL, 0);
 704}
 705
 706static struct pool_workqueue *get_work_pwq(struct work_struct *work)
 707{
 708        unsigned long data = atomic_long_read(&work->data);
 709
 710        if (data & WORK_STRUCT_PWQ)
 711                return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
 712        else
 713                return NULL;
 714}
 715
 716/**
 717 * get_work_pool - return the worker_pool a given work was associated with
 718 * @work: the work item of interest
 719 *
 720 * Pools are created and destroyed under wq_pool_mutex, and allows read
 721 * access under sched-RCU read lock.  As such, this function should be
 722 * called under wq_pool_mutex or with preemption disabled.
 723 *
 724 * All fields of the returned pool are accessible as long as the above
 725 * mentioned locking is in effect.  If the returned pool needs to be used
 726 * beyond the critical section, the caller is responsible for ensuring the
 727 * returned pool is and stays online.
 728 *
 729 * Return: The worker_pool @work was last associated with.  %NULL if none.
 730 */
 731static struct worker_pool *get_work_pool(struct work_struct *work)
 732{
 733        unsigned long data = atomic_long_read(&work->data);
 734        int pool_id;
 735
 736        assert_rcu_or_pool_mutex();
 737
 738        if (data & WORK_STRUCT_PWQ)
 739                return ((struct pool_workqueue *)
 740                        (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
 741
 742        pool_id = data >> WORK_OFFQ_POOL_SHIFT;
 743        if (pool_id == WORK_OFFQ_POOL_NONE)
 744                return NULL;
 745
 746        return idr_find(&worker_pool_idr, pool_id);
 747}
 748
 749/**
 750 * get_work_pool_id - return the worker pool ID a given work is associated with
 751 * @work: the work item of interest
 752 *
 753 * Return: The worker_pool ID @work was last associated with.
 754 * %WORK_OFFQ_POOL_NONE if none.
 755 */
 756static int get_work_pool_id(struct work_struct *work)
 757{
 758        unsigned long data = atomic_long_read(&work->data);
 759
 760        if (data & WORK_STRUCT_PWQ)
 761                return ((struct pool_workqueue *)
 762                        (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
 763
 764        return data >> WORK_OFFQ_POOL_SHIFT;
 765}
 766
 767static void mark_work_canceling(struct work_struct *work)
 768{
 769        unsigned long pool_id = get_work_pool_id(work);
 770
 771        pool_id <<= WORK_OFFQ_POOL_SHIFT;
 772        set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
 773}
 774
 775static bool work_is_canceling(struct work_struct *work)
 776{
 777        unsigned long data = atomic_long_read(&work->data);
 778
 779        return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
 780}
 781
 782/*
 783 * Policy functions.  These define the policies on how the global worker
 784 * pools are managed.  Unless noted otherwise, these functions assume that
 785 * they're being called with pool->lock held.
 786 */
 787
 788static bool __need_more_worker(struct worker_pool *pool)
 789{
 790        return !atomic_read(&pool->nr_running);
 791}
 792
 793/*
 794 * Need to wake up a worker?  Called from anything but currently
 795 * running workers.
 796 *
 797 * Note that, because unbound workers never contribute to nr_running, this
 798 * function will always return %true for unbound pools as long as the
 799 * worklist isn't empty.
 800 */
 801static bool need_more_worker(struct worker_pool *pool)
 802{
 803        return !list_empty(&pool->worklist) && __need_more_worker(pool);
 804}
 805
 806/* Can I start working?  Called from busy but !running workers. */
 807static bool may_start_working(struct worker_pool *pool)
 808{
 809        return pool->nr_idle;
 810}
 811
 812/* Do I need to keep working?  Called from currently running workers. */
 813static bool keep_working(struct worker_pool *pool)
 814{
 815        return !list_empty(&pool->worklist) &&
 816                atomic_read(&pool->nr_running) <= 1;
 817}
 818
 819/* Do we need a new worker?  Called from manager. */
 820static bool need_to_create_worker(struct worker_pool *pool)
 821{
 822        return need_more_worker(pool) && !may_start_working(pool);
 823}
 824
 825/* Do we have too many workers and should some go away? */
 826static bool too_many_workers(struct worker_pool *pool)
 827{
 828        bool managing = mutex_is_locked(&pool->manager_arb);
 829        int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
 830        int nr_busy = pool->nr_workers - nr_idle;
 831
 832        return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
 833}
 834
 835/*
 836 * Wake up functions.
 837 */
 838
 839/* Return the first idle worker.  Safe with preemption disabled */
 840static struct worker *first_idle_worker(struct worker_pool *pool)
 841{
 842        if (unlikely(list_empty(&pool->idle_list)))
 843                return NULL;
 844
 845        return list_first_entry(&pool->idle_list, struct worker, entry);
 846}
 847
 848/**
 849 * wake_up_worker - wake up an idle worker
 850 * @pool: worker pool to wake worker from
 851 *
 852 * Wake up the first idle worker of @pool.
 853 *
 854 * CONTEXT:
 855 * spin_lock_irq(pool->lock).
 856 */
 857static void wake_up_worker(struct worker_pool *pool)
 858{
 859        struct worker *worker = first_idle_worker(pool);
 860
 861        if (likely(worker))
 862                wake_up_process(worker->task);
 863}
 864
 865/**
 866 * wq_worker_waking_up - a worker is waking up
 867 * @task: task waking up
 868 * @cpu: CPU @task is waking up to
 869 *
 870 * This function is called during try_to_wake_up() when a worker is
 871 * being awoken.
 872 *
 873 * CONTEXT:
 874 * spin_lock_irq(rq->lock)
 875 */
 876void wq_worker_waking_up(struct task_struct *task, int cpu)
 877{
 878        struct worker *worker = kthread_data(task);
 879
 880        if (!(worker->flags & WORKER_NOT_RUNNING)) {
 881                WARN_ON_ONCE(worker->pool->cpu != cpu);
 882                atomic_inc(&worker->pool->nr_running);
 883        }
 884}
 885
 886/**
 887 * wq_worker_sleeping - a worker is going to sleep
 888 * @task: task going to sleep
 889 *
 890 * This function is called during schedule() when a busy worker is
 891 * going to sleep.  Worker on the same cpu can be woken up by
 892 * returning pointer to its task.
 893 *
 894 * CONTEXT:
 895 * spin_lock_irq(rq->lock)
 896 *
 897 * Return:
 898 * Worker task on @cpu to wake up, %NULL if none.
 899 */
 900struct task_struct *wq_worker_sleeping(struct task_struct *task)
 901{
 902        struct worker *worker = kthread_data(task), *to_wakeup = NULL;
 903        struct worker_pool *pool;
 904
 905        /*
 906         * Rescuers, which may not have all the fields set up like normal
 907         * workers, also reach here, let's not access anything before
 908         * checking NOT_RUNNING.
 909         */
 910        if (worker->flags & WORKER_NOT_RUNNING)
 911                return NULL;
 912
 913        pool = worker->pool;
 914
 915        /* this can only happen on the local cpu */
 916        if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id()))
 917                return NULL;
 918
 919        /*
 920         * The counterpart of the following dec_and_test, implied mb,
 921         * worklist not empty test sequence is in insert_work().
 922         * Please read comment there.
 923         *
 924         * NOT_RUNNING is clear.  This means that we're bound to and
 925         * running on the local cpu w/ rq lock held and preemption
 926         * disabled, which in turn means that none else could be
 927         * manipulating idle_list, so dereferencing idle_list without pool
 928         * lock is safe.
 929         */
 930        if (atomic_dec_and_test(&pool->nr_running) &&
 931            !list_empty(&pool->worklist))
 932                to_wakeup = first_idle_worker(pool);
 933        return to_wakeup ? to_wakeup->task : NULL;
 934}
 935
 936/**
 937 * worker_set_flags - set worker flags and adjust nr_running accordingly
 938 * @worker: self
 939 * @flags: flags to set
 940 *
 941 * Set @flags in @worker->flags and adjust nr_running accordingly.
 942 *
 943 * CONTEXT:
 944 * spin_lock_irq(pool->lock)
 945 */
 946static inline void worker_set_flags(struct worker *worker, unsigned int flags)
 947{
 948        struct worker_pool *pool = worker->pool;
 949
 950        WARN_ON_ONCE(worker->task != current);
 951
 952        /* If transitioning into NOT_RUNNING, adjust nr_running. */
 953        if ((flags & WORKER_NOT_RUNNING) &&
 954            !(worker->flags & WORKER_NOT_RUNNING)) {
 955                atomic_dec(&pool->nr_running);
 956        }
 957
 958        worker->flags |= flags;
 959}
 960
 961/**
 962 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
 963 * @worker: self
 964 * @flags: flags to clear
 965 *
 966 * Clear @flags in @worker->flags and adjust nr_running accordingly.
 967 *
 968 * CONTEXT:
 969 * spin_lock_irq(pool->lock)
 970 */
 971static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 972{
 973        struct worker_pool *pool = worker->pool;
 974        unsigned int oflags = worker->flags;
 975
 976        WARN_ON_ONCE(worker->task != current);
 977
 978        worker->flags &= ~flags;
 979
 980        /*
 981         * If transitioning out of NOT_RUNNING, increment nr_running.  Note
 982         * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
 983         * of multiple flags, not a single flag.
 984         */
 985        if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
 986                if (!(worker->flags & WORKER_NOT_RUNNING))
 987                        atomic_inc(&pool->nr_running);
 988}
 989
 990/**
 991 * find_worker_executing_work - find worker which is executing a work
 992 * @pool: pool of interest
 993 * @work: work to find worker for
 994 *
 995 * Find a worker which is executing @work on @pool by searching
 996 * @pool->busy_hash which is keyed by the address of @work.  For a worker
 997 * to match, its current execution should match the address of @work and
 998 * its work function.  This is to avoid unwanted dependency between
 999 * unrelated work executions through a work item being recycled while still
1000 * being executed.
1001 *
1002 * This is a bit tricky.  A work item may be freed once its execution
1003 * starts and nothing prevents the freed area from being recycled for
1004 * another work item.  If the same work item address ends up being reused
1005 * before the original execution finishes, workqueue will identify the
1006 * recycled work item as currently executing and make it wait until the
1007 * current execution finishes, introducing an unwanted dependency.
1008 *
1009 * This function checks the work item address and work function to avoid
1010 * false positives.  Note that this isn't complete as one may construct a
1011 * work function which can introduce dependency onto itself through a
1012 * recycled work item.  Well, if somebody wants to shoot oneself in the
1013 * foot that badly, there's only so much we can do, and if such deadlock
1014 * actually occurs, it should be easy to locate the culprit work function.
1015 *
1016 * CONTEXT:
1017 * spin_lock_irq(pool->lock).
1018 *
1019 * Return:
1020 * Pointer to worker which is executing @work if found, %NULL
1021 * otherwise.
1022 */
1023static struct worker *find_worker_executing_work(struct worker_pool *pool,
1024                                                 struct work_struct *work)
1025{
1026        struct worker *worker;
1027
1028        hash_for_each_possible(pool->busy_hash, worker, hentry,
1029                               (unsigned long)work)
1030                if (worker->current_work == work &&
1031                    worker->current_func == work->func)
1032                        return worker;
1033
1034        return NULL;
1035}
1036
1037/**
1038 * move_linked_works - move linked works to a list
1039 * @work: start of series of works to be scheduled
1040 * @head: target list to append @work to
1041 * @nextp: out parameter for nested worklist walking
1042 *
1043 * Schedule linked works starting from @work to @head.  Work series to
1044 * be scheduled starts at @work and includes any consecutive work with
1045 * WORK_STRUCT_LINKED set in its predecessor.
1046 *
1047 * If @nextp is not NULL, it's updated to point to the next work of
1048 * the last scheduled work.  This allows move_linked_works() to be
1049 * nested inside outer list_for_each_entry_safe().
1050 *
1051 * CONTEXT:
1052 * spin_lock_irq(pool->lock).
1053 */
1054static void move_linked_works(struct work_struct *work, struct list_head *head,
1055                              struct work_struct **nextp)
1056{
1057        struct work_struct *n;
1058
1059        /*
1060         * Linked worklist will always end before the end of the list,
1061         * use NULL for list head.
1062         */
1063        list_for_each_entry_safe_from(work, n, NULL, entry) {
1064                list_move_tail(&work->entry, head);
1065                if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1066                        break;
1067        }
1068
1069        /*
1070         * If we're already inside safe list traversal and have moved
1071         * multiple works to the scheduled queue, the next position
1072         * needs to be updated.
1073         */
1074        if (nextp)
1075                *nextp = n;
1076}
1077
1078/**
1079 * get_pwq - get an extra reference on the specified pool_workqueue
1080 * @pwq: pool_workqueue to get
1081 *
1082 * Obtain an extra reference on @pwq.  The caller should guarantee that
1083 * @pwq has positive refcnt and be holding the matching pool->lock.
1084 */
1085static void get_pwq(struct pool_workqueue *pwq)
1086{
1087        lockdep_assert_held(&pwq->pool->lock);
1088        WARN_ON_ONCE(pwq->refcnt <= 0);
1089        pwq->refcnt++;
1090}
1091
1092/**
1093 * put_pwq - put a pool_workqueue reference
1094 * @pwq: pool_workqueue to put
1095 *
1096 * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
1097 * destruction.  The caller should be holding the matching pool->lock.
1098 */
1099static void put_pwq(struct pool_workqueue *pwq)
1100{
1101        lockdep_assert_held(&pwq->pool->lock);
1102        if (likely(--pwq->refcnt))
1103                return;
1104        if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1105                return;
1106        /*
1107         * @pwq can't be released under pool->lock, bounce to
1108         * pwq_unbound_release_workfn().  This never recurses on the same
1109         * pool->lock as this path is taken only for unbound workqueues and
1110         * the release work item is scheduled on a per-cpu workqueue.  To
1111         * avoid lockdep warning, unbound pool->locks are given lockdep
1112         * subclass of 1 in get_unbound_pool().
1113         */
1114        schedule_work(&pwq->unbound_release_work);
1115}
1116
1117/**
1118 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1119 * @pwq: pool_workqueue to put (can be %NULL)
1120 *
1121 * put_pwq() with locking.  This function also allows %NULL @pwq.
1122 */
1123static void put_pwq_unlocked(struct pool_workqueue *pwq)
1124{
1125        if (pwq) {
1126                /*
1127                 * As both pwqs and pools are sched-RCU protected, the
1128                 * following lock operations are safe.
1129                 */
1130                spin_lock_irq(&pwq->pool->lock);
1131                put_pwq(pwq);
1132                spin_unlock_irq(&pwq->pool->lock);
1133        }
1134}
1135
1136static void pwq_activate_delayed_work(struct work_struct *work)
1137{
1138        struct pool_workqueue *pwq = get_work_pwq(work);
1139
1140        trace_workqueue_activate_work(work);
1141        if (list_empty(&pwq->pool->worklist))
1142                pwq->pool->watchdog_ts = jiffies;
1143        move_linked_works(work, &pwq->pool->worklist, NULL);
1144        __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1145        pwq->nr_active++;
1146}
1147
1148static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
1149{
1150        struct work_struct *work = list_first_entry(&pwq->delayed_works,
1151                                                    struct work_struct, entry);
1152
1153        pwq_activate_delayed_work(work);
1154}
1155
1156/**
1157 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1158 * @pwq: pwq of interest
1159 * @color: color of work which left the queue
1160 *
1161 * A work either has completed or is removed from pending queue,
1162 * decrement nr_in_flight of its pwq and handle workqueue flushing.
1163 *
1164 * CONTEXT:
1165 * spin_lock_irq(pool->lock).
1166 */
1167static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1168{
1169        /* uncolored work items don't participate in flushing or nr_active */
1170        if (color == WORK_NO_COLOR)
1171                goto out_put;
1172
1173        pwq->nr_in_flight[color]--;
1174
1175        pwq->nr_active--;
1176        if (!list_empty(&pwq->delayed_works)) {
1177                /* one down, submit a delayed one */
1178                if (pwq->nr_active < pwq->max_active)
1179                        pwq_activate_first_delayed(pwq);
1180        }
1181
1182        /* is flush in progress and are we at the flushing tip? */
1183        if (likely(pwq->flush_color != color))
1184                goto out_put;
1185
1186        /* are there still in-flight works? */
1187        if (pwq->nr_in_flight[color])
1188                goto out_put;
1189
1190        /* this pwq is done, clear flush_color */
1191        pwq->flush_color = -1;
1192
1193        /*
1194         * If this was the last pwq, wake up the first flusher.  It
1195         * will handle the rest.
1196         */
1197        if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1198                complete(&pwq->wq->first_flusher->done);
1199out_put:
1200        put_pwq(pwq);
1201}
1202
1203/**
1204 * try_to_grab_pending - steal work item from worklist and disable irq
1205 * @work: work item to steal
1206 * @is_dwork: @work is a delayed_work
1207 * @flags: place to store irq state
1208 *
1209 * Try to grab PENDING bit of @work.  This function can handle @work in any
1210 * stable state - idle, on timer or on worklist.
1211 *
1212 * Return:
1213 *  1           if @work was pending and we successfully stole PENDING
1214 *  0           if @work was idle and we claimed PENDING
1215 *  -EAGAIN     if PENDING couldn't be grabbed at the moment, safe to busy-retry
1216 *  -ENOENT     if someone else is canceling @work, this state may persist
1217 *              for arbitrarily long
1218 *
1219 * Note:
1220 * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
1221 * interrupted while holding PENDING and @work off queue, irq must be
1222 * disabled on entry.  This, combined with delayed_work->timer being
1223 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1224 *
1225 * On successful return, >= 0, irq is disabled and the caller is
1226 * responsible for releasing it using local_irq_restore(*@flags).
1227 *
1228 * This function is safe to call from any context including IRQ handler.
1229 */
1230static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1231                               unsigned long *flags)
1232{
1233        struct worker_pool *pool;
1234        struct pool_workqueue *pwq;
1235
1236        local_irq_save(*flags);
1237
1238        /* try to steal the timer if it exists */
1239        if (is_dwork) {
1240                struct delayed_work *dwork = to_delayed_work(work);
1241
1242                /*
1243                 * dwork->timer is irqsafe.  If del_timer() fails, it's
1244                 * guaranteed that the timer is not queued anywhere and not
1245                 * running on the local CPU.
1246                 */
1247                if (likely(del_timer(&dwork->timer)))
1248                        return 1;
1249        }
1250
1251        /* try to claim PENDING the normal way */
1252        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1253                return 0;
1254
1255        /*
1256         * The queueing is in progress, or it is already queued. Try to
1257         * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1258         */
1259        pool = get_work_pool(work);
1260        if (!pool)
1261                goto fail;
1262
1263        spin_lock(&pool->lock);
1264        /*
1265         * work->data is guaranteed to point to pwq only while the work
1266         * item is queued on pwq->wq, and both updating work->data to point
1267         * to pwq on queueing and to pool on dequeueing are done under
1268         * pwq->pool->lock.  This in turn guarantees that, if work->data
1269         * points to pwq which is associated with a locked pool, the work
1270         * item is currently queued on that pool.
1271         */
1272        pwq = get_work_pwq(work);
1273        if (pwq && pwq->pool == pool) {
1274                debug_work_deactivate(work);
1275
1276                /*
1277                 * A delayed work item cannot be grabbed directly because
1278                 * it might have linked NO_COLOR work items which, if left
1279                 * on the delayed_list, will confuse pwq->nr_active
1280                 * management later on and cause stall.  Make sure the work
1281                 * item is activated before grabbing.
1282                 */
1283                if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1284                        pwq_activate_delayed_work(work);
1285
1286                list_del_init(&work->entry);
1287                pwq_dec_nr_in_flight(pwq, get_work_color(work));
1288
1289                /* work->data points to pwq iff queued, point to pool */
1290                set_work_pool_and_keep_pending(work, pool->id);
1291
1292                spin_unlock(&pool->lock);
1293                return 1;
1294        }
1295        spin_unlock(&pool->lock);
1296fail:
1297        local_irq_restore(*flags);
1298        if (work_is_canceling(work))
1299                return -ENOENT;
1300        cpu_relax();
1301        return -EAGAIN;
1302}
1303
1304/**
1305 * insert_work - insert a work into a pool
1306 * @pwq: pwq @work belongs to
1307 * @work: work to insert
1308 * @head: insertion point
1309 * @extra_flags: extra WORK_STRUCT_* flags to set
1310 *
1311 * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
1312 * work_struct flags.
1313 *
1314 * CONTEXT:
1315 * spin_lock_irq(pool->lock).
1316 */
1317static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1318                        struct list_head *head, unsigned int extra_flags)
1319{
1320        struct worker_pool *pool = pwq->pool;
1321
1322        /* we own @work, set data and link */
1323        set_work_pwq(work, pwq, extra_flags);
1324        list_add_tail(&work->entry, head);
1325        get_pwq(pwq);
1326
1327        /*
1328         * Ensure either wq_worker_sleeping() sees the above
1329         * list_add_tail() or we see zero nr_running to avoid workers lying
1330         * around lazily while there are works to be processed.
1331         */
1332        smp_mb();
1333
1334        if (__need_more_worker(pool))
1335                wake_up_worker(pool);
1336}
1337
1338/*
1339 * Test whether @work is being queued from another work executing on the
1340 * same workqueue.
1341 */
1342static bool is_chained_work(struct workqueue_struct *wq)
1343{
1344        struct worker *worker;
1345
1346        worker = current_wq_worker();
1347        /*
1348         * Return %true iff I'm a worker execuing a work item on @wq.  If
1349         * I'm @worker, it's safe to dereference it without locking.
1350         */
1351        return worker && worker->current_pwq->wq == wq;
1352}
1353
1354/*
1355 * When queueing an unbound work item to a wq, prefer local CPU if allowed
1356 * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
1357 * avoid perturbing sensitive tasks.
1358 */
1359static int wq_select_unbound_cpu(int cpu)
1360{
1361        static bool printed_dbg_warning;
1362        int new_cpu;
1363
1364        if (likely(!wq_debug_force_rr_cpu)) {
1365                if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1366                        return cpu;
1367        } else if (!printed_dbg_warning) {
1368                pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
1369                printed_dbg_warning = true;
1370        }
1371
1372        if (cpumask_empty(wq_unbound_cpumask))
1373                return cpu;
1374
1375        new_cpu = __this_cpu_read(wq_rr_cpu_last);
1376        new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1377        if (unlikely(new_cpu >= nr_cpu_ids)) {
1378                new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1379                if (unlikely(new_cpu >= nr_cpu_ids))
1380                        return cpu;
1381        }
1382        __this_cpu_write(wq_rr_cpu_last, new_cpu);
1383
1384        return new_cpu;
1385}
1386
1387static void __queue_work(int cpu, struct workqueue_struct *wq,
1388                         struct work_struct *work)
1389{
1390        struct pool_workqueue *pwq;
1391        struct worker_pool *last_pool;
1392        struct list_head *worklist;
1393        unsigned int work_flags;
1394        unsigned int req_cpu = cpu;
1395
1396        /*
1397         * While a work item is PENDING && off queue, a task trying to
1398         * steal the PENDING will busy-loop waiting for it to either get
1399         * queued or lose PENDING.  Grabbing PENDING and queueing should
1400         * happen with IRQ disabled.
1401         */
1402        WARN_ON_ONCE(!irqs_disabled());
1403
1404        debug_work_activate(work);
1405
1406        /* if draining, only works from the same workqueue are allowed */
1407        if (unlikely(wq->flags & __WQ_DRAINING) &&
1408            WARN_ON_ONCE(!is_chained_work(wq)))
1409                return;
1410retry:
1411        if (req_cpu == WORK_CPU_UNBOUND)
1412                cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1413
1414        /* pwq which will be used unless @work is executing elsewhere */
1415        if (!(wq->flags & WQ_UNBOUND))
1416                pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1417        else
1418                pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1419
1420        /*
1421         * If @work was previously on a different pool, it might still be
1422         * running there, in which case the work needs to be queued on that
1423         * pool to guarantee non-reentrancy.
1424         */
1425        last_pool = get_work_pool(work);
1426        if (last_pool && last_pool != pwq->pool) {
1427                struct worker *worker;
1428
1429                spin_lock(&last_pool->lock);
1430
1431                worker = find_worker_executing_work(last_pool, work);
1432
1433                if (worker && worker->current_pwq->wq == wq) {
1434                        pwq = worker->current_pwq;
1435                } else {
1436                        /* meh... not running there, queue here */
1437                        spin_unlock(&last_pool->lock);
1438                        spin_lock(&pwq->pool->lock);
1439                }
1440        } else {
1441                spin_lock(&pwq->pool->lock);
1442        }
1443
1444        /*
1445         * pwq is determined and locked.  For unbound pools, we could have
1446         * raced with pwq release and it could already be dead.  If its
1447         * refcnt is zero, repeat pwq selection.  Note that pwqs never die
1448         * without another pwq replacing it in the numa_pwq_tbl or while
1449         * work items are executing on it, so the retrying is guaranteed to
1450         * make forward-progress.
1451         */
1452        if (unlikely(!pwq->refcnt)) {
1453                if (wq->flags & WQ_UNBOUND) {
1454                        spin_unlock(&pwq->pool->lock);
1455                        cpu_relax();
1456                        goto retry;
1457                }
1458                /* oops */
1459                WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1460                          wq->name, cpu);
1461        }
1462
1463        /* pwq determined, queue */
1464        trace_workqueue_queue_work(req_cpu, pwq, work);
1465
1466        if (WARN_ON(!list_empty(&work->entry))) {
1467                spin_unlock(&pwq->pool->lock);
1468                return;
1469        }
1470
1471        pwq->nr_in_flight[pwq->work_color]++;
1472        work_flags = work_color_to_flags(pwq->work_color);
1473
1474        if (likely(pwq->nr_active < pwq->max_active)) {
1475                trace_workqueue_activate_work(work);
1476                pwq->nr_active++;
1477                worklist = &pwq->pool->worklist;
1478                if (list_empty(worklist))
1479                        pwq->pool->watchdog_ts = jiffies;
1480        } else {
1481                work_flags |= WORK_STRUCT_DELAYED;
1482                worklist = &pwq->delayed_works;
1483        }
1484
1485        insert_work(pwq, work, worklist, work_flags);
1486
1487        spin_unlock(&pwq->pool->lock);
1488}
1489
1490/**
1491 * queue_work_on - queue work on specific cpu
1492 * @cpu: CPU number to execute work on
1493 * @wq: workqueue to use
1494 * @work: work to queue
1495 *
1496 * We queue the work to a specific CPU, the caller must ensure it
1497 * can't go away.
1498 *
1499 * Return: %false if @work was already on a queue, %true otherwise.
1500 */
1501bool queue_work_on(int cpu, struct workqueue_struct *wq,
1502                   struct work_struct *work)
1503{
1504        bool ret = false;
1505        unsigned long flags;
1506
1507        local_irq_save(flags);
1508
1509        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1510                __queue_work(cpu, wq, work);
1511                ret = true;
1512        }
1513
1514        local_irq_restore(flags);
1515        return ret;
1516}
1517EXPORT_SYMBOL(queue_work_on);
1518
1519void delayed_work_timer_fn(unsigned long __data)
1520{
1521        struct delayed_work *dwork = (struct delayed_work *)__data;
1522
1523        /* should have been called from irqsafe timer with irq already off */
1524        __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1525}
1526EXPORT_SYMBOL(delayed_work_timer_fn);
1527
1528static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1529                                struct delayed_work *dwork, unsigned long delay)
1530{
1531        struct timer_list *timer = &dwork->timer;
1532        struct work_struct *work = &dwork->work;
1533
1534        WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1535                     timer->data != (unsigned long)dwork);
1536        WARN_ON_ONCE(timer_pending(timer));
1537        WARN_ON_ONCE(!list_empty(&work->entry));
1538
1539        /*
1540         * If @delay is 0, queue @dwork->work immediately.  This is for
1541         * both optimization and correctness.  The earliest @timer can
1542         * expire is on the closest next tick and delayed_work users depend
1543         * on that there's no such delay when @delay is 0.
1544         */
1545        if (!delay) {
1546                __queue_work(cpu, wq, &dwork->work);
1547                return;
1548        }
1549
1550        timer_stats_timer_set_start_info(&dwork->timer);
1551
1552        dwork->wq = wq;
1553        dwork->cpu = cpu;
1554        timer->expires = jiffies + delay;
1555
1556        if (unlikely(cpu != WORK_CPU_UNBOUND))
1557                add_timer_on(timer, cpu);
1558        else
1559                add_timer(timer);
1560}
1561
1562/**
1563 * queue_delayed_work_on - queue work on specific CPU after delay
1564 * @cpu: CPU number to execute work on
1565 * @wq: workqueue to use
1566 * @dwork: work to queue
1567 * @delay: number of jiffies to wait before queueing
1568 *
1569 * Return: %false if @work was already on a queue, %true otherwise.  If
1570 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1571 * execution.
1572 */
1573bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1574                           struct delayed_work *dwork, unsigned long delay)
1575{
1576        struct work_struct *work = &dwork->work;
1577        bool ret = false;
1578        unsigned long flags;
1579
1580        /* read the comment in __queue_work() */
1581        local_irq_save(flags);
1582
1583        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1584                __queue_delayed_work(cpu, wq, dwork, delay);
1585                ret = true;
1586        }
1587
1588        local_irq_restore(flags);
1589        return ret;
1590}
1591EXPORT_SYMBOL(queue_delayed_work_on);
1592
1593/**
1594 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1595 * @cpu: CPU number to execute work on
1596 * @wq: workqueue to use
1597 * @dwork: work to queue
1598 * @delay: number of jiffies to wait before queueing
1599 *
1600 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1601 * modify @dwork's timer so that it expires after @delay.  If @delay is
1602 * zero, @work is guaranteed to be scheduled immediately regardless of its
1603 * current state.
1604 *
1605 * Return: %false if @dwork was idle and queued, %true if @dwork was
1606 * pending and its timer was modified.
1607 *
1608 * This function is safe to call from any context including IRQ handler.
1609 * See try_to_grab_pending() for details.
1610 */
1611bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1612                         struct delayed_work *dwork, unsigned long delay)
1613{
1614        unsigned long flags;
1615        int ret;
1616
1617        do {
1618                ret = try_to_grab_pending(&dwork->work, true, &flags);
1619        } while (unlikely(ret == -EAGAIN));
1620
1621        if (likely(ret >= 0)) {
1622                __queue_delayed_work(cpu, wq, dwork, delay);
1623                local_irq_restore(flags);
1624        }
1625
1626        /* -ENOENT from try_to_grab_pending() becomes %true */
1627        return ret;
1628}
1629EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1630
1631/**
1632 * worker_enter_idle - enter idle state
1633 * @worker: worker which is entering idle state
1634 *
1635 * @worker is entering idle state.  Update stats and idle timer if
1636 * necessary.
1637 *
1638 * LOCKING:
1639 * spin_lock_irq(pool->lock).
1640 */
1641static void worker_enter_idle(struct worker *worker)
1642{
1643        struct worker_pool *pool = worker->pool;
1644
1645        if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1646            WARN_ON_ONCE(!list_empty(&worker->entry) &&
1647                         (worker->hentry.next || worker->hentry.pprev)))
1648                return;
1649
1650        /* can't use worker_set_flags(), also called from create_worker() */
1651        worker->flags |= WORKER_IDLE;
1652        pool->nr_idle++;
1653        worker->last_active = jiffies;
1654
1655        /* idle_list is LIFO */
1656        list_add(&worker->entry, &pool->idle_list);
1657
1658        if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1659                mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1660
1661        /*
1662         * Sanity check nr_running.  Because wq_unbind_fn() releases
1663         * pool->lock between setting %WORKER_UNBOUND and zapping
1664         * nr_running, the warning may trigger spuriously.  Check iff
1665         * unbind is not in progress.
1666         */
1667        WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1668                     pool->nr_workers == pool->nr_idle &&
1669                     atomic_read(&pool->nr_running));
1670}
1671
1672/**
1673 * worker_leave_idle - leave idle state
1674 * @worker: worker which is leaving idle state
1675 *
1676 * @worker is leaving idle state.  Update stats.
1677 *
1678 * LOCKING:
1679 * spin_lock_irq(pool->lock).
1680 */
1681static void worker_leave_idle(struct worker *worker)
1682{
1683        struct worker_pool *pool = worker->pool;
1684
1685        if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1686                return;
1687        worker_clr_flags(worker, WORKER_IDLE);
1688        pool->nr_idle--;
1689        list_del_init(&worker->entry);
1690}
1691
1692static struct worker *alloc_worker(int node)
1693{
1694        struct worker *worker;
1695
1696        worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1697        if (worker) {
1698                INIT_LIST_HEAD(&worker->entry);
1699                INIT_LIST_HEAD(&worker->scheduled);
1700                INIT_LIST_HEAD(&worker->node);
1701                /* on creation a worker is in !idle && prep state */
1702                worker->flags = WORKER_PREP;
1703        }
1704        return worker;
1705}
1706
1707/**
1708 * worker_attach_to_pool() - attach a worker to a pool
1709 * @worker: worker to be attached
1710 * @pool: the target pool
1711 *
1712 * Attach @worker to @pool.  Once attached, the %WORKER_UNBOUND flag and
1713 * cpu-binding of @worker are kept coordinated with the pool across
1714 * cpu-[un]hotplugs.
1715 */
1716static void worker_attach_to_pool(struct worker *worker,
1717                                   struct worker_pool *pool)
1718{
1719        mutex_lock(&pool->attach_mutex);
1720
1721        /*
1722         * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1723         * online CPUs.  It'll be re-applied when any of the CPUs come up.
1724         */
1725        set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1726
1727        /*
1728         * The pool->attach_mutex ensures %POOL_DISASSOCIATED remains
1729         * stable across this function.  See the comments above the
1730         * flag definition for details.
1731         */
1732        if (pool->flags & POOL_DISASSOCIATED)
1733                worker->flags |= WORKER_UNBOUND;
1734
1735        list_add_tail(&worker->node, &pool->workers);
1736
1737        mutex_unlock(&pool->attach_mutex);
1738}
1739
1740/**
1741 * worker_detach_from_pool() - detach a worker from its pool
1742 * @worker: worker which is attached to its pool
1743 * @pool: the pool @worker is attached to
1744 *
1745 * Undo the attaching which had been done in worker_attach_to_pool().  The
1746 * caller worker shouldn't access to the pool after detached except it has
1747 * other reference to the pool.
1748 */
1749static void worker_detach_from_pool(struct worker *worker,
1750                                    struct worker_pool *pool)
1751{
1752        struct completion *detach_completion = NULL;
1753
1754        mutex_lock(&pool->attach_mutex);
1755        list_del(&worker->node);
1756        if (list_empty(&pool->workers))
1757                detach_completion = pool->detach_completion;
1758        mutex_unlock(&pool->attach_mutex);
1759
1760        /* clear leftover flags without pool->lock after it is detached */
1761        worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1762
1763        if (detach_completion)
1764                complete(detach_completion);
1765}
1766
1767/**
1768 * create_worker - create a new workqueue worker
1769 * @pool: pool the new worker will belong to
1770 *
1771 * Create and start a new worker which is attached to @pool.
1772 *
1773 * CONTEXT:
1774 * Might sleep.  Does GFP_KERNEL allocations.
1775 *
1776 * Return:
1777 * Pointer to the newly created worker.
1778 */
1779static struct worker *create_worker(struct worker_pool *pool)
1780{
1781        struct worker *worker = NULL;
1782        int id = -1;
1783        char id_buf[16];
1784
1785        /* ID is needed to determine kthread name */
1786        id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
1787        if (id < 0)
1788                goto fail;
1789
1790        worker = alloc_worker(pool->node);
1791        if (!worker)
1792                goto fail;
1793
1794        worker->pool = pool;
1795        worker->id = id;
1796
1797        if (pool->cpu >= 0)
1798                snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1799                         pool->attrs->nice < 0  ? "H" : "");
1800        else
1801                snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1802
1803        worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1804                                              "kworker/%s", id_buf);
1805        if (IS_ERR(worker->task))
1806                goto fail;
1807
1808        set_user_nice(worker->task, pool->attrs->nice);
1809        kthread_bind_mask(worker->task, pool->attrs->cpumask);
1810
1811        /* successful, attach the worker to the pool */
1812        worker_attach_to_pool(worker, pool);
1813
1814        /* start the newly created worker */
1815        spin_lock_irq(&pool->lock);
1816        worker->pool->nr_workers++;
1817        worker_enter_idle(worker);
1818        wake_up_process(worker->task);
1819        spin_unlock_irq(&pool->lock);
1820
1821        return worker;
1822
1823fail:
1824        if (id >= 0)
1825                ida_simple_remove(&pool->worker_ida, id);
1826        kfree(worker);
1827        return NULL;
1828}
1829
1830/**
1831 * destroy_worker - destroy a workqueue worker
1832 * @worker: worker to be destroyed
1833 *
1834 * Destroy @worker and adjust @pool stats accordingly.  The worker should
1835 * be idle.
1836 *
1837 * CONTEXT:
1838 * spin_lock_irq(pool->lock).
1839 */
1840static void destroy_worker(struct worker *worker)
1841{
1842        struct worker_pool *pool = worker->pool;
1843
1844        lockdep_assert_held(&pool->lock);
1845
1846        /* sanity check frenzy */
1847        if (WARN_ON(worker->current_work) ||
1848            WARN_ON(!list_empty(&worker->scheduled)) ||
1849            WARN_ON(!(worker->flags & WORKER_IDLE)))
1850                return;
1851
1852        pool->nr_workers--;
1853        pool->nr_idle--;
1854
1855        list_del_init(&worker->entry);
1856        worker->flags |= WORKER_DIE;
1857        wake_up_process(worker->task);
1858}
1859
1860static void idle_worker_timeout(unsigned long __pool)
1861{
1862        struct worker_pool *pool = (void *)__pool;
1863
1864        spin_lock_irq(&pool->lock);
1865
1866        while (too_many_workers(pool)) {
1867                struct worker *worker;
1868                unsigned long expires;
1869
1870                /* idle_list is kept in LIFO order, check the last one */
1871                worker = list_entry(pool->idle_list.prev, struct worker, entry);
1872                expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1873
1874                if (time_before(jiffies, expires)) {
1875                        mod_timer(&pool->idle_timer, expires);
1876                        break;
1877                }
1878
1879                destroy_worker(worker);
1880        }
1881
1882        spin_unlock_irq(&pool->lock);
1883}
1884
1885static void send_mayday(struct work_struct *work)
1886{
1887        struct pool_workqueue *pwq = get_work_pwq(work);
1888        struct workqueue_struct *wq = pwq->wq;
1889
1890        lockdep_assert_held(&wq_mayday_lock);
1891
1892        if (!wq->rescuer)
1893                return;
1894
1895        /* mayday mayday mayday */
1896        if (list_empty(&pwq->mayday_node)) {
1897                /*
1898                 * If @pwq is for an unbound wq, its base ref may be put at
1899                 * any time due to an attribute change.  Pin @pwq until the
1900                 * rescuer is done with it.
1901                 */
1902                get_pwq(pwq);
1903                list_add_tail(&pwq->mayday_node, &wq->maydays);
1904                wake_up_process(wq->rescuer->task);
1905        }
1906}
1907
1908static void pool_mayday_timeout(unsigned long __pool)
1909{
1910        struct worker_pool *pool = (void *)__pool;
1911        struct work_struct *work;
1912
1913        spin_lock_irq(&pool->lock);
1914        spin_lock(&wq_mayday_lock);             /* for wq->maydays */
1915
1916        if (need_to_create_worker(pool)) {
1917                /*
1918                 * We've been trying to create a new worker but
1919                 * haven't been successful.  We might be hitting an
1920                 * allocation deadlock.  Send distress signals to
1921                 * rescuers.
1922                 */
1923                list_for_each_entry(work, &pool->worklist, entry)
1924                        send_mayday(work);
1925        }
1926
1927        spin_unlock(&wq_mayday_lock);
1928        spin_unlock_irq(&pool->lock);
1929
1930        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1931}
1932
1933/**
1934 * maybe_create_worker - create a new worker if necessary
1935 * @pool: pool to create a new worker for
1936 *
1937 * Create a new worker for @pool if necessary.  @pool is guaranteed to
1938 * have at least one idle worker on return from this function.  If
1939 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1940 * sent to all rescuers with works scheduled on @pool to resolve
1941 * possible allocation deadlock.
1942 *
1943 * On return, need_to_create_worker() is guaranteed to be %false and
1944 * may_start_working() %true.
1945 *
1946 * LOCKING:
1947 * spin_lock_irq(pool->lock) which may be released and regrabbed
1948 * multiple times.  Does GFP_KERNEL allocations.  Called only from
1949 * manager.
1950 */
1951static void maybe_create_worker(struct worker_pool *pool)
1952__releases(&pool->lock)
1953__acquires(&pool->lock)
1954{
1955restart:
1956        spin_unlock_irq(&pool->lock);
1957
1958        /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1959        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1960
1961        while (true) {
1962                if (create_worker(pool) || !need_to_create_worker(pool))
1963                        break;
1964
1965                schedule_timeout_interruptible(CREATE_COOLDOWN);
1966
1967                if (!need_to_create_worker(pool))
1968                        break;
1969        }
1970
1971        del_timer_sync(&pool->mayday_timer);
1972        spin_lock_irq(&pool->lock);
1973        /*
1974         * This is necessary even after a new worker was just successfully
1975         * created as @pool->lock was dropped and the new worker might have
1976         * already become busy.
1977         */
1978        if (need_to_create_worker(pool))
1979                goto restart;
1980}
1981
1982/**
1983 * manage_workers - manage worker pool
1984 * @worker: self
1985 *
1986 * Assume the manager role and manage the worker pool @worker belongs
1987 * to.  At any given time, there can be only zero or one manager per
1988 * pool.  The exclusion is handled automatically by this function.
1989 *
1990 * The caller can safely start processing works on false return.  On
1991 * true return, it's guaranteed that need_to_create_worker() is false
1992 * and may_start_working() is true.
1993 *
1994 * CONTEXT:
1995 * spin_lock_irq(pool->lock) which may be released and regrabbed
1996 * multiple times.  Does GFP_KERNEL allocations.
1997 *
1998 * Return:
1999 * %false if the pool doesn't need management and the caller can safely
2000 * start processing works, %true if management function was performed and
2001 * the conditions that the caller verified before calling the function may
2002 * no longer be true.
2003 */
2004static bool manage_workers(struct worker *worker)
2005{
2006        struct worker_pool *pool = worker->pool;
2007
2008        /*
2009         * Anyone who successfully grabs manager_arb wins the arbitration
2010         * and becomes the manager.  mutex_trylock() on pool->manager_arb
2011         * failure while holding pool->lock reliably indicates that someone
2012         * else is managing the pool and the worker which failed trylock
2013         * can proceed to executing work items.  This means that anyone
2014         * grabbing manager_arb is responsible for actually performing
2015         * manager duties.  If manager_arb is grabbed and released without
2016         * actual management, the pool may stall indefinitely.
2017         */
2018        if (!mutex_trylock(&pool->manager_arb))
2019                return false;
2020        pool->manager = worker;
2021
2022        maybe_create_worker(pool);
2023
2024        pool->manager = NULL;
2025        mutex_unlock(&pool->manager_arb);
2026        return true;
2027}
2028
2029/**
2030 * process_one_work - process single work
2031 * @worker: self
2032 * @work: work to process
2033 *
2034 * Process @work.  This function contains all the logics necessary to
2035 * process a single work including synchronization against and
2036 * interaction with other workers on the same cpu, queueing and
2037 * flushing.  As long as context requirement is met, any worker can
2038 * call this function to process a work.
2039 *
2040 * CONTEXT:
2041 * spin_lock_irq(pool->lock) which is released and regrabbed.
2042 */
2043static void process_one_work(struct worker *worker, struct work_struct *work)
2044__releases(&pool->lock)
2045__acquires(&pool->lock)
2046{
2047        struct pool_workqueue *pwq = get_work_pwq(work);
2048        struct worker_pool *pool = worker->pool;
2049        bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2050        int work_color;
2051        struct worker *collision;
2052#ifdef CONFIG_LOCKDEP
2053        /*
2054         * It is permissible to free the struct work_struct from
2055         * inside the function that is called from it, this we need to
2056         * take into account for lockdep too.  To avoid bogus "held
2057         * lock freed" warnings as well as problems when looking into
2058         * work->lockdep_map, make a copy and use that here.
2059         */
2060        struct lockdep_map lockdep_map;
2061
2062        lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2063#endif
2064        /* ensure we're on the correct CPU */
2065        WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2066                     raw_smp_processor_id() != pool->cpu);
2067
2068        /*
2069         * A single work shouldn't be executed concurrently by
2070         * multiple workers on a single cpu.  Check whether anyone is
2071         * already processing the work.  If so, defer the work to the
2072         * currently executing one.
2073         */
2074        collision = find_worker_executing_work(pool, work);
2075        if (unlikely(collision)) {
2076                move_linked_works(work, &collision->scheduled, NULL);
2077                return;
2078        }
2079
2080        /* claim and dequeue */
2081        debug_work_deactivate(work);
2082        hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2083        worker->current_work = work;
2084        worker->current_func = work->func;
2085        worker->current_pwq = pwq;
2086        work_color = get_work_color(work);
2087
2088        list_del_init(&work->entry);
2089
2090        /*
2091         * CPU intensive works don't participate in concurrency management.
2092         * They're the scheduler's responsibility.  This takes @worker out
2093         * of concurrency management and the next code block will chain
2094         * execution of the pending work items.
2095         */
2096        if (unlikely(cpu_intensive))
2097                worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2098
2099        /*
2100         * Wake up another worker if necessary.  The condition is always
2101         * false for normal per-cpu workers since nr_running would always
2102         * be >= 1 at this point.  This is used to chain execution of the
2103         * pending work items for WORKER_NOT_RUNNING workers such as the
2104         * UNBOUND and CPU_INTENSIVE ones.
2105         */
2106        if (need_more_worker(pool))
2107                wake_up_worker(pool);
2108
2109        /*
2110         * Record the last pool and clear PENDING which should be the last
2111         * update to @work.  Also, do this inside @pool->lock so that
2112         * PENDING and queued state changes happen together while IRQ is
2113         * disabled.
2114         */
2115        set_work_pool_and_clear_pending(work, pool->id);
2116
2117        spin_unlock_irq(&pool->lock);
2118
2119        lock_map_acquire_read(&pwq->wq->lockdep_map);
2120        lock_map_acquire(&lockdep_map);
2121        trace_workqueue_execute_start(work);
2122        worker->current_func(work);
2123        /*
2124         * While we must be careful to not use "work" after this, the trace
2125         * point will only record its address.
2126         */
2127        trace_workqueue_execute_end(work);
2128        lock_map_release(&lockdep_map);
2129        lock_map_release(&pwq->wq->lockdep_map);
2130
2131        if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2132                pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2133                       "     last function: %pf\n",
2134                       current->comm, preempt_count(), task_pid_nr(current),
2135                       worker->current_func);
2136                debug_show_held_locks(current);
2137                dump_stack();
2138        }
2139
2140        /*
2141         * The following prevents a kworker from hogging CPU on !PREEMPT
2142         * kernels, where a requeueing work item waiting for something to
2143         * happen could deadlock with stop_machine as such work item could
2144         * indefinitely requeue itself while all other CPUs are trapped in
2145         * stop_machine. At the same time, report a quiescent RCU state so
2146         * the same condition doesn't freeze RCU.
2147         */
2148        cond_resched_rcu_qs();
2149
2150        spin_lock_irq(&pool->lock);
2151
2152        /* clear cpu intensive status */
2153        if (unlikely(cpu_intensive))
2154                worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2155
2156        /* we're done with it, release */
2157        hash_del(&worker->hentry);
2158        worker->current_work = NULL;
2159        worker->current_func = NULL;
2160        worker->current_pwq = NULL;
2161        worker->desc_valid = false;
2162        pwq_dec_nr_in_flight(pwq, work_color);
2163}
2164
2165/**
2166 * process_scheduled_works - process scheduled works
2167 * @worker: self
2168 *
2169 * Process all scheduled works.  Please note that the scheduled list
2170 * may change while processing a work, so this function repeatedly
2171 * fetches a work from the top and executes it.
2172 *
2173 * CONTEXT:
2174 * spin_lock_irq(pool->lock) which may be released and regrabbed
2175 * multiple times.
2176 */
2177static void process_scheduled_works(struct worker *worker)
2178{
2179        while (!list_empty(&worker->scheduled)) {
2180                struct work_struct *work = list_first_entry(&worker->scheduled,
2181                                                struct work_struct, entry);
2182                process_one_work(worker, work);
2183        }
2184}
2185
2186/**
2187 * worker_thread - the worker thread function
2188 * @__worker: self
2189 *
2190 * The worker thread function.  All workers belong to a worker_pool -
2191 * either a per-cpu one or dynamic unbound one.  These workers process all
2192 * work items regardless of their specific target workqueue.  The only
2193 * exception is work items which belong to workqueues with a rescuer which
2194 * will be explained in rescuer_thread().
2195 *
2196 * Return: 0
2197 */
2198static int worker_thread(void *__worker)
2199{
2200        struct worker *worker = __worker;
2201        struct worker_pool *pool = worker->pool;
2202
2203        /* tell the scheduler that this is a workqueue worker */
2204        worker->task->flags |= PF_WQ_WORKER;
2205woke_up:
2206        spin_lock_irq(&pool->lock);
2207
2208        /* am I supposed to die? */
2209        if (unlikely(worker->flags & WORKER_DIE)) {
2210                spin_unlock_irq(&pool->lock);
2211                WARN_ON_ONCE(!list_empty(&worker->entry));
2212                worker->task->flags &= ~PF_WQ_WORKER;
2213
2214                set_task_comm(worker->task, "kworker/dying");
2215                ida_simple_remove(&pool->worker_ida, worker->id);
2216                worker_detach_from_pool(worker, pool);
2217                kfree(worker);
2218                return 0;
2219        }
2220
2221        worker_leave_idle(worker);
2222recheck:
2223        /* no more worker necessary? */
2224        if (!need_more_worker(pool))
2225                goto sleep;
2226
2227        /* do we need to manage? */
2228        if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2229                goto recheck;
2230
2231        /*
2232         * ->scheduled list can only be filled while a worker is
2233         * preparing to process a work or actually processing it.
2234         * Make sure nobody diddled with it while I was sleeping.
2235         */
2236        WARN_ON_ONCE(!list_empty(&worker->scheduled));
2237
2238        /*
2239         * Finish PREP stage.  We're guaranteed to have at least one idle
2240         * worker or that someone else has already assumed the manager
2241         * role.  This is where @worker starts participating in concurrency
2242         * management if applicable and concurrency management is restored
2243         * after being rebound.  See rebind_workers() for details.
2244         */
2245        worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2246
2247        do {
2248                struct work_struct *work =
2249                        list_first_entry(&pool->worklist,
2250                                         struct work_struct, entry);
2251
2252                pool->watchdog_ts = jiffies;
2253
2254                if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2255                        /* optimization path, not strictly necessary */
2256                        process_one_work(worker, work);
2257                        if (unlikely(!list_empty(&worker->scheduled)))
2258                                process_scheduled_works(worker);
2259                } else {
2260                        move_linked_works(work, &worker->scheduled, NULL);
2261                        process_scheduled_works(worker);
2262                }
2263        } while (keep_working(pool));
2264
2265        worker_set_flags(worker, WORKER_PREP);
2266sleep:
2267        /*
2268         * pool->lock is held and there's no work to process and no need to
2269         * manage, sleep.  Workers are woken up only while holding
2270         * pool->lock or from local cpu, so setting the current state
2271         * before releasing pool->lock is enough to prevent losing any
2272         * event.
2273         */
2274        worker_enter_idle(worker);
2275        __set_current_state(TASK_INTERRUPTIBLE);
2276        spin_unlock_irq(&pool->lock);
2277        schedule();
2278        goto woke_up;
2279}
2280
2281/**
2282 * rescuer_thread - the rescuer thread function
2283 * @__rescuer: self
2284 *
2285 * Workqueue rescuer thread function.  There's one rescuer for each
2286 * workqueue which has WQ_MEM_RECLAIM set.
2287 *
2288 * Regular work processing on a pool may block trying to create a new
2289 * worker which uses GFP_KERNEL allocation which has slight chance of
2290 * developing into deadlock if some works currently on the same queue
2291 * need to be processed to satisfy the GFP_KERNEL allocation.  This is
2292 * the problem rescuer solves.
2293 *
2294 * When such condition is possible, the pool summons rescuers of all
2295 * workqueues which have works queued on the pool and let them process
2296 * those works so that forward progress can be guaranteed.
2297 *
2298 * This should happen rarely.
2299 *
2300 * Return: 0
2301 */
2302static int rescuer_thread(void *__rescuer)
2303{
2304        struct worker *rescuer = __rescuer;
2305        struct workqueue_struct *wq = rescuer->rescue_wq;
2306        struct list_head *scheduled = &rescuer->scheduled;
2307        bool should_stop;
2308
2309        set_user_nice(current, RESCUER_NICE_LEVEL);
2310
2311        /*
2312         * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
2313         * doesn't participate in concurrency management.
2314         */
2315        rescuer->task->flags |= PF_WQ_WORKER;
2316repeat:
2317        set_current_state(TASK_INTERRUPTIBLE);
2318
2319        /*
2320         * By the time the rescuer is requested to stop, the workqueue
2321         * shouldn't have any work pending, but @wq->maydays may still have
2322         * pwq(s) queued.  This can happen by non-rescuer workers consuming
2323         * all the work items before the rescuer got to them.  Go through
2324         * @wq->maydays processing before acting on should_stop so that the
2325         * list is always empty on exit.
2326         */
2327        should_stop = kthread_should_stop();
2328
2329        /* see whether any pwq is asking for help */
2330        spin_lock_irq(&wq_mayday_lock);
2331
2332        while (!list_empty(&wq->maydays)) {
2333                struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2334                                        struct pool_workqueue, mayday_node);
2335                struct worker_pool *pool = pwq->pool;
2336                struct work_struct *work, *n;
2337                bool first = true;
2338
2339                __set_current_state(TASK_RUNNING);
2340                list_del_init(&pwq->mayday_node);
2341
2342                spin_unlock_irq(&wq_mayday_lock);
2343
2344                worker_attach_to_pool(rescuer, pool);
2345
2346                spin_lock_irq(&pool->lock);
2347                rescuer->pool = pool;
2348
2349                /*
2350                 * Slurp in all works issued via this workqueue and
2351                 * process'em.
2352                 */
2353                WARN_ON_ONCE(!list_empty(scheduled));
2354                list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2355                        if (get_work_pwq(work) == pwq) {
2356                                if (first)
2357                                        pool->watchdog_ts = jiffies;
2358                                move_linked_works(work, scheduled, &n);
2359                        }
2360                        first = false;
2361                }
2362
2363                if (!list_empty(scheduled)) {
2364                        process_scheduled_works(rescuer);
2365
2366                        /*
2367                         * The above execution of rescued work items could
2368                         * have created more to rescue through
2369                         * pwq_activate_first_delayed() or chained
2370                         * queueing.  Let's put @pwq back on mayday list so
2371                         * that such back-to-back work items, which may be
2372                         * being used to relieve memory pressure, don't
2373                         * incur MAYDAY_INTERVAL delay inbetween.
2374                         */
2375                        if (need_to_create_worker(pool)) {
2376                                spin_lock(&wq_mayday_lock);
2377                                get_pwq(pwq);
2378                                list_move_tail(&pwq->mayday_node, &wq->maydays);
2379                                spin_unlock(&wq_mayday_lock);
2380                        }
2381                }
2382
2383                /*
2384                 * Put the reference grabbed by send_mayday().  @pool won't
2385                 * go away while we're still attached to it.
2386                 */
2387                put_pwq(pwq);
2388
2389                /*
2390                 * Leave this pool.  If need_more_worker() is %true, notify a
2391                 * regular worker; otherwise, we end up with 0 concurrency
2392                 * and stalling the execution.
2393                 */
2394                if (need_more_worker(pool))
2395                        wake_up_worker(pool);
2396
2397                rescuer->pool = NULL;
2398                spin_unlock_irq(&pool->lock);
2399
2400                worker_detach_from_pool(rescuer, pool);
2401
2402                spin_lock_irq(&wq_mayday_lock);
2403        }
2404
2405        spin_unlock_irq(&wq_mayday_lock);
2406
2407        if (should_stop) {
2408                __set_current_state(TASK_RUNNING);
2409                rescuer->task->flags &= ~PF_WQ_WORKER;
2410                return 0;
2411        }
2412
2413        /* rescuers should never participate in concurrency management */
2414        WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2415        schedule();
2416        goto repeat;
2417}
2418
2419/**
2420 * check_flush_dependency - check for flush dependency sanity
2421 * @target_wq: workqueue being flushed
2422 * @target_work: work item being flushed (NULL for workqueue flushes)
2423 *
2424 * %current is trying to flush the whole @target_wq or @target_work on it.
2425 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2426 * reclaiming memory or running on a workqueue which doesn't have
2427 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2428 * a deadlock.
2429 */
2430static void check_flush_dependency(struct workqueue_struct *target_wq,
2431                                   struct work_struct *target_work)
2432{
2433        work_func_t target_func = target_work ? target_work->func : NULL;
2434        struct worker *worker;
2435
2436        if (target_wq->flags & WQ_MEM_RECLAIM)
2437                return;
2438
2439        worker = current_wq_worker();
2440
2441        WARN_ONCE(current->flags & PF_MEMALLOC,
2442                  "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf",
2443                  current->pid, current->comm, target_wq->name, target_func);
2444        WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2445                              (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2446                  "workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf",
2447                  worker->current_pwq->wq->name, worker->current_func,
2448                  target_wq->name, target_func);
2449}
2450
2451struct wq_barrier {
2452        struct work_struct      work;
2453        struct completion       done;
2454        struct task_struct      *task;  /* purely informational */
2455};
2456
2457static void wq_barrier_func(struct work_struct *work)
2458{
2459        struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2460        complete(&barr->done);
2461}
2462
2463/**
2464 * insert_wq_barrier - insert a barrier work
2465 * @pwq: pwq to insert barrier into
2466 * @barr: wq_barrier to insert
2467 * @target: target work to attach @barr to
2468 * @worker: worker currently executing @target, NULL if @target is not executing
2469 *
2470 * @barr is linked to @target such that @barr is completed only after
2471 * @target finishes execution.  Please note that the ordering
2472 * guarantee is observed only with respect to @target and on the local
2473 * cpu.
2474 *
2475 * Currently, a queued barrier can't be canceled.  This is because
2476 * try_to_grab_pending() can't determine whether the work to be
2477 * grabbed is at the head of the queue and thus can't clear LINKED
2478 * flag of the previous work while there must be a valid next work
2479 * after a work with LINKED flag set.
2480 *
2481 * Note that when @worker is non-NULL, @target may be modified
2482 * underneath us, so we can't reliably determine pwq from @target.
2483 *
2484 * CONTEXT:
2485 * spin_lock_irq(pool->lock).
2486 */
2487static void insert_wq_barrier(struct pool_workqueue *pwq,
2488                              struct wq_barrier *barr,
2489                              struct work_struct *target, struct worker *worker)
2490{
2491        struct list_head *head;
2492        unsigned int linked = 0;
2493
2494        /*
2495         * debugobject calls are safe here even with pool->lock locked
2496         * as we know for sure that this will not trigger any of the
2497         * checks and call back into the fixup functions where we
2498         * might deadlock.
2499         */
2500        INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2501        __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2502        init_completion(&barr->done);
2503        barr->task = current;
2504
2505        /*
2506         * If @target is currently being executed, schedule the
2507         * barrier to the worker; otherwise, put it after @target.
2508         */
2509        if (worker)
2510                head = worker->scheduled.next;
2511        else {
2512                unsigned long *bits = work_data_bits(target);
2513
2514                head = target->entry.next;
2515                /* there can already be other linked works, inherit and set */
2516                linked = *bits & WORK_STRUCT_LINKED;
2517                __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2518        }
2519
2520        debug_work_activate(&barr->work);
2521        insert_work(pwq, &barr->work, head,
2522                    work_color_to_flags(WORK_NO_COLOR) | linked);
2523}
2524
2525/**
2526 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2527 * @wq: workqueue being flushed
2528 * @flush_color: new flush color, < 0 for no-op
2529 * @work_color: new work color, < 0 for no-op
2530 *
2531 * Prepare pwqs for workqueue flushing.
2532 *
2533 * If @flush_color is non-negative, flush_color on all pwqs should be
2534 * -1.  If no pwq has in-flight commands at the specified color, all
2535 * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
2536 * has in flight commands, its pwq->flush_color is set to
2537 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2538 * wakeup logic is armed and %true is returned.
2539 *
2540 * The caller should have initialized @wq->first_flusher prior to
2541 * calling this function with non-negative @flush_color.  If
2542 * @flush_color is negative, no flush color update is done and %false
2543 * is returned.
2544 *
2545 * If @work_color is non-negative, all pwqs should have the same
2546 * work_color which is previous to @work_color and all will be
2547 * advanced to @work_color.
2548 *
2549 * CONTEXT:
2550 * mutex_lock(wq->mutex).
2551 *
2552 * Return:
2553 * %true if @flush_color >= 0 and there's something to flush.  %false
2554 * otherwise.
2555 */
2556static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2557                                      int flush_color, int work_color)
2558{
2559        bool wait = false;
2560        struct pool_workqueue *pwq;
2561
2562        if (flush_color >= 0) {
2563                WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2564                atomic_set(&wq->nr_pwqs_to_flush, 1);
2565        }
2566
2567        for_each_pwq(pwq, wq) {
2568                struct worker_pool *pool = pwq->pool;
2569
2570                spin_lock_irq(&pool->lock);
2571
2572                if (flush_color >= 0) {
2573                        WARN_ON_ONCE(pwq->flush_color != -1);
2574
2575                        if (pwq->nr_in_flight[flush_color]) {
2576                                pwq->flush_color = flush_color;
2577                                atomic_inc(&wq->nr_pwqs_to_flush);
2578                                wait = true;
2579                        }
2580                }
2581
2582                if (work_color >= 0) {
2583                        WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2584                        pwq->work_color = work_color;
2585                }
2586
2587                spin_unlock_irq(&pool->lock);
2588        }
2589
2590        if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2591                complete(&wq->first_flusher->done);
2592
2593        return wait;
2594}
2595
2596/**
2597 * flush_workqueue - ensure that any scheduled work has run to completion.
2598 * @wq: workqueue to flush
2599 *
2600 * This function sleeps until all work items which were queued on entry
2601 * have finished execution, but it is not livelocked by new incoming ones.
2602 */
2603void flush_workqueue(struct workqueue_struct *wq)
2604{
2605        struct wq_flusher this_flusher = {
2606                .list = LIST_HEAD_INIT(this_flusher.list),
2607                .flush_color = -1,
2608                .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2609        };
2610        int next_color;
2611
2612        lock_map_acquire(&wq->lockdep_map);
2613        lock_map_release(&wq->lockdep_map);
2614
2615        mutex_lock(&wq->mutex);
2616
2617        /*
2618         * Start-to-wait phase
2619         */
2620        next_color = work_next_color(wq->work_color);
2621
2622        if (next_color != wq->flush_color) {
2623                /*
2624                 * Color space is not full.  The current work_color
2625                 * becomes our flush_color and work_color is advanced
2626                 * by one.
2627                 */
2628                WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2629                this_flusher.flush_color = wq->work_color;
2630                wq->work_color = next_color;
2631
2632                if (!wq->first_flusher) {
2633                        /* no flush in progress, become the first flusher */
2634                        WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2635
2636                        wq->first_flusher = &this_flusher;
2637
2638                        if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2639                                                       wq->work_color)) {
2640                                /* nothing to flush, done */
2641                                wq->flush_color = next_color;
2642                                wq->first_flusher = NULL;
2643                                goto out_unlock;
2644                        }
2645                } else {
2646                        /* wait in queue */
2647                        WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2648                        list_add_tail(&this_flusher.list, &wq->flusher_queue);
2649                        flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2650                }
2651        } else {
2652                /*
2653                 * Oops, color space is full, wait on overflow queue.
2654                 * The next flush completion will assign us
2655                 * flush_color and transfer to flusher_queue.
2656                 */
2657                list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2658        }
2659
2660        check_flush_dependency(wq, NULL);
2661
2662        mutex_unlock(&wq->mutex);
2663
2664        wait_for_completion(&this_flusher.done);
2665
2666        /*
2667         * Wake-up-and-cascade phase
2668         *
2669         * First flushers are responsible for cascading flushes and
2670         * handling overflow.  Non-first flushers can simply return.
2671         */
2672        if (wq->first_flusher != &this_flusher)
2673                return;
2674
2675        mutex_lock(&wq->mutex);
2676
2677        /* we might have raced, check again with mutex held */
2678        if (wq->first_flusher != &this_flusher)
2679                goto out_unlock;
2680
2681        wq->first_flusher = NULL;
2682
2683        WARN_ON_ONCE(!list_empty(&this_flusher.list));
2684        WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2685
2686        while (true) {
2687                struct wq_flusher *next, *tmp;
2688
2689                /* complete all the flushers sharing the current flush color */
2690                list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2691                        if (next->flush_color != wq->flush_color)
2692                                break;
2693                        list_del_init(&next->list);
2694                        complete(&next->done);
2695                }
2696
2697                WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2698                             wq->flush_color != work_next_color(wq->work_color));
2699
2700                /* this flush_color is finished, advance by one */
2701                wq->flush_color = work_next_color(wq->flush_color);
2702
2703                /* one color has been freed, handle overflow queue */
2704                if (!list_empty(&wq->flusher_overflow)) {
2705                        /*
2706                         * Assign the same color to all overflowed
2707                         * flushers, advance work_color and append to
2708                         * flusher_queue.  This is the start-to-wait
2709                         * phase for these overflowed flushers.
2710                         */
2711                        list_for_each_entry(tmp, &wq->flusher_overflow, list)
2712                                tmp->flush_color = wq->work_color;
2713
2714                        wq->work_color = work_next_color(wq->work_color);
2715
2716                        list_splice_tail_init(&wq->flusher_overflow,
2717                                              &wq->flusher_queue);
2718                        flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2719                }
2720
2721                if (list_empty(&wq->flusher_queue)) {
2722                        WARN_ON_ONCE(wq->flush_color != wq->work_color);
2723                        break;
2724                }
2725
2726                /*
2727                 * Need to flush more colors.  Make the next flusher
2728                 * the new first flusher and arm pwqs.
2729                 */
2730                WARN_ON_ONCE(wq->flush_color == wq->work_color);
2731                WARN_ON_ONCE(wq->flush_color != next->flush_color);
2732
2733                list_del_init(&next->list);
2734                wq->first_flusher = next;
2735
2736                if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2737                        break;
2738
2739                /*
2740                 * Meh... this color is already done, clear first
2741                 * flusher and repeat cascading.
2742                 */
2743                wq->first_flusher = NULL;
2744        }
2745
2746out_unlock:
2747        mutex_unlock(&wq->mutex);
2748}
2749EXPORT_SYMBOL(flush_workqueue);
2750
2751/**
2752 * drain_workqueue - drain a workqueue
2753 * @wq: workqueue to drain
2754 *
2755 * Wait until the workqueue becomes empty.  While draining is in progress,
2756 * only chain queueing is allowed.  IOW, only currently pending or running
2757 * work items on @wq can queue further work items on it.  @wq is flushed
2758 * repeatedly until it becomes empty.  The number of flushing is determined
2759 * by the depth of chaining and should be relatively short.  Whine if it
2760 * takes too long.
2761 */
2762void drain_workqueue(struct workqueue_struct *wq)
2763{
2764        unsigned int flush_cnt = 0;
2765        struct pool_workqueue *pwq;
2766
2767        /*
2768         * __queue_work() needs to test whether there are drainers, is much
2769         * hotter than drain_workqueue() and already looks at @wq->flags.
2770         * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2771         */
2772        mutex_lock(&wq->mutex);
2773        if (!wq->nr_drainers++)
2774                wq->flags |= __WQ_DRAINING;
2775        mutex_unlock(&wq->mutex);
2776reflush:
2777        flush_workqueue(wq);
2778
2779        mutex_lock(&wq->mutex);
2780
2781        for_each_pwq(pwq, wq) {
2782                bool drained;
2783
2784                spin_lock_irq(&pwq->pool->lock);
2785                drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2786                spin_unlock_irq(&pwq->pool->lock);
2787
2788                if (drained)
2789                        continue;
2790
2791                if (++flush_cnt == 10 ||
2792                    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2793                        pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
2794                                wq->name, flush_cnt);
2795
2796                mutex_unlock(&wq->mutex);
2797                goto reflush;
2798        }
2799
2800        if (!--wq->nr_drainers)
2801                wq->flags &= ~__WQ_DRAINING;
2802        mutex_unlock(&wq->mutex);
2803}
2804EXPORT_SYMBOL_GPL(drain_workqueue);
2805
2806static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2807{
2808        struct worker *worker = NULL;
2809        struct worker_pool *pool;
2810        struct pool_workqueue *pwq;
2811
2812        might_sleep();
2813
2814        local_irq_disable();
2815        pool = get_work_pool(work);
2816        if (!pool) {
2817                local_irq_enable();
2818                return false;
2819        }
2820
2821        spin_lock(&pool->lock);
2822        /* see the comment in try_to_grab_pending() with the same code */
2823        pwq = get_work_pwq(work);
2824        if (pwq) {
2825                if (unlikely(pwq->pool != pool))
2826                        goto already_gone;
2827        } else {
2828                worker = find_worker_executing_work(pool, work);
2829                if (!worker)
2830                        goto already_gone;
2831                pwq = worker->current_pwq;
2832        }
2833
2834        check_flush_dependency(pwq->wq, work);
2835
2836        insert_wq_barrier(pwq, barr, work, worker);
2837        spin_unlock_irq(&pool->lock);
2838
2839        /*
2840         * If @max_active is 1 or rescuer is in use, flushing another work
2841         * item on the same workqueue may lead to deadlock.  Make sure the
2842         * flusher is not running on the same workqueue by verifying write
2843         * access.
2844         */
2845        if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)
2846                lock_map_acquire(&pwq->wq->lockdep_map);
2847        else
2848                lock_map_acquire_read(&pwq->wq->lockdep_map);
2849        lock_map_release(&pwq->wq->lockdep_map);
2850
2851        return true;
2852already_gone:
2853        spin_unlock_irq(&pool->lock);
2854        return false;
2855}
2856
2857/**
2858 * flush_work - wait for a work to finish executing the last queueing instance
2859 * @work: the work to flush
2860 *
2861 * Wait until @work has finished execution.  @work is guaranteed to be idle
2862 * on return if it hasn't been requeued since flush started.
2863 *
2864 * Return:
2865 * %true if flush_work() waited for the work to finish execution,
2866 * %false if it was already idle.
2867 */
2868bool flush_work(struct work_struct *work)
2869{
2870        struct wq_barrier barr;
2871
2872        lock_map_acquire(&work->lockdep_map);
2873        lock_map_release(&work->lockdep_map);
2874
2875        if (start_flush_work(work, &barr)) {
2876                wait_for_completion(&barr.done);
2877                destroy_work_on_stack(&barr.work);
2878                return true;
2879        } else {
2880                return false;
2881        }
2882}
2883EXPORT_SYMBOL_GPL(flush_work);
2884
2885struct cwt_wait {
2886        wait_queue_t            wait;
2887        struct work_struct      *work;
2888};
2889
2890static int cwt_wakefn(wait_queue_t *wait, unsigned mode, int sync, void *key)
2891{
2892        struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
2893
2894        if (cwait->work != key)
2895                return 0;
2896        return autoremove_wake_function(wait, mode, sync, key);
2897}
2898
2899static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2900{
2901        static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
2902        unsigned long flags;
2903        int ret;
2904
2905        do {
2906                ret = try_to_grab_pending(work, is_dwork, &flags);
2907                /*
2908                 * If someone else is already canceling, wait for it to
2909                 * finish.  flush_work() doesn't work for PREEMPT_NONE
2910                 * because we may get scheduled between @work's completion
2911                 * and the other canceling task resuming and clearing
2912                 * CANCELING - flush_work() will return false immediately
2913                 * as @work is no longer busy, try_to_grab_pending() will
2914                 * return -ENOENT as @work is still being canceled and the
2915                 * other canceling task won't be able to clear CANCELING as
2916                 * we're hogging the CPU.
2917                 *
2918                 * Let's wait for completion using a waitqueue.  As this
2919                 * may lead to the thundering herd problem, use a custom
2920                 * wake function which matches @work along with exclusive
2921                 * wait and wakeup.
2922                 */
2923                if (unlikely(ret == -ENOENT)) {
2924                        struct cwt_wait cwait;
2925
2926                        init_wait(&cwait.wait);
2927                        cwait.wait.func = cwt_wakefn;
2928                        cwait.work = work;
2929
2930                        prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
2931                                                  TASK_UNINTERRUPTIBLE);
2932                        if (work_is_canceling(work))
2933                                schedule();
2934                        finish_wait(&cancel_waitq, &cwait.wait);
2935                }
2936        } while (unlikely(ret < 0));
2937
2938        /* tell other tasks trying to grab @work to back off */
2939        mark_work_canceling(work);
2940        local_irq_restore(flags);
2941
2942        flush_work(work);
2943        clear_work_data(work);
2944
2945        /*
2946         * Paired with prepare_to_wait() above so that either
2947         * waitqueue_active() is visible here or !work_is_canceling() is
2948         * visible there.
2949         */
2950        smp_mb();
2951        if (waitqueue_active(&cancel_waitq))
2952                __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
2953
2954        return ret;
2955}
2956
2957/**
2958 * cancel_work_sync - cancel a work and wait for it to finish
2959 * @work: the work to cancel
2960 *
2961 * Cancel @work and wait for its execution to finish.  This function
2962 * can be used even if the work re-queues itself or migrates to
2963 * another workqueue.  On return from this function, @work is
2964 * guaranteed to be not pending or executing on any CPU.
2965 *
2966 * cancel_work_sync(&delayed_work->work) must not be used for
2967 * delayed_work's.  Use cancel_delayed_work_sync() instead.
2968 *
2969 * The caller must ensure that the workqueue on which @work was last
2970 * queued can't be destroyed before this function returns.
2971 *
2972 * Return:
2973 * %true if @work was pending, %false otherwise.
2974 */
2975bool cancel_work_sync(struct work_struct *work)
2976{
2977        return __cancel_work_timer(work, false);
2978}
2979EXPORT_SYMBOL_GPL(cancel_work_sync);
2980
2981/**
2982 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2983 * @dwork: the delayed work to flush
2984 *
2985 * Delayed timer is cancelled and the pending work is queued for
2986 * immediate execution.  Like flush_work(), this function only
2987 * considers the last queueing instance of @dwork.
2988 *
2989 * Return:
2990 * %true if flush_work() waited for the work to finish execution,
2991 * %false if it was already idle.
2992 */
2993bool flush_delayed_work(struct delayed_work *dwork)
2994{
2995        local_irq_disable();
2996        if (del_timer_sync(&dwork->timer))
2997                __queue_work(dwork->cpu, dwork->wq, &dwork->work);
2998        local_irq_enable();
2999        return flush_work(&dwork->work);
3000}
3001EXPORT_SYMBOL(flush_delayed_work);
3002
3003/**
3004 * cancel_delayed_work - cancel a delayed work
3005 * @dwork: delayed_work to cancel
3006 *
3007 * Kill off a pending delayed_work.
3008 *
3009 * Return: %true if @dwork was pending and canceled; %false if it wasn't
3010 * pending.
3011 *
3012 * Note:
3013 * The work callback function may still be running on return, unless
3014 * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
3015 * use cancel_delayed_work_sync() to wait on it.
3016 *
3017 * This function is safe to call from any context including IRQ handler.
3018 */
3019bool cancel_delayed_work(struct delayed_work *dwork)
3020{
3021        unsigned long flags;
3022        int ret;
3023
3024        do {
3025                ret = try_to_grab_pending(&dwork->work, true, &flags);
3026        } while (unlikely(ret == -EAGAIN));
3027
3028        if (unlikely(ret < 0))
3029                return false;
3030
3031        set_work_pool_and_clear_pending(&dwork->work,
3032                                        get_work_pool_id(&dwork->work));
3033        local_irq_restore(flags);
3034        return ret;
3035}
3036EXPORT_SYMBOL(cancel_delayed_work);
3037
3038/**
3039 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3040 * @dwork: the delayed work cancel
3041 *
3042 * This is cancel_work_sync() for delayed works.
3043 *
3044 * Return:
3045 * %true if @dwork was pending, %false otherwise.
3046 */
3047bool cancel_delayed_work_sync(struct delayed_work *dwork)
3048{
3049        return __cancel_work_timer(&dwork->work, true);
3050}
3051EXPORT_SYMBOL(cancel_delayed_work_sync);
3052
3053/**
3054 * schedule_on_each_cpu - execute a function synchronously on each online CPU
3055 * @func: the function to call
3056 *
3057 * schedule_on_each_cpu() executes @func on each online CPU using the
3058 * system workqueue and blocks until all CPUs have completed.
3059 * schedule_on_each_cpu() is very slow.
3060 *
3061 * Return:
3062 * 0 on success, -errno on failure.
3063 */
3064int schedule_on_each_cpu(work_func_t func)
3065{
3066        int cpu;
3067        struct work_struct __percpu *works;
3068
3069        works = alloc_percpu(struct work_struct);
3070        if (!works)
3071                return -ENOMEM;
3072
3073        get_online_cpus();
3074
3075        for_each_online_cpu(cpu) {
3076                struct work_struct *work = per_cpu_ptr(works, cpu);
3077
3078                INIT_WORK(work, func);
3079                schedule_work_on(cpu, work);
3080        }
3081
3082        for_each_online_cpu(cpu)
3083                flush_work(per_cpu_ptr(works, cpu));
3084
3085        put_online_cpus();
3086        free_percpu(works);
3087        return 0;
3088}
3089
3090/**
3091 * execute_in_process_context - reliably execute the routine with user context
3092 * @fn:         the function to execute
3093 * @ew:         guaranteed storage for the execute work structure (must
3094 *              be available when the work executes)
3095 *
3096 * Executes the function immediately if process context is available,
3097 * otherwise schedules the function for delayed execution.
3098 *
3099 * Return:      0 - function was executed
3100 *              1 - function was scheduled for execution
3101 */
3102int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3103{
3104        if (!in_interrupt()) {
3105                fn(&ew->work);
3106                return 0;
3107        }
3108
3109        INIT_WORK(&ew->work, fn);
3110        schedule_work(&ew->work);
3111
3112        return 1;
3113}
3114EXPORT_SYMBOL_GPL(execute_in_process_context);
3115
3116/**
3117 * free_workqueue_attrs - free a workqueue_attrs
3118 * @attrs: workqueue_attrs to free
3119 *
3120 * Undo alloc_workqueue_attrs().
3121 */
3122void free_workqueue_attrs(struct workqueue_attrs *attrs)
3123{
3124        if (attrs) {
3125                free_cpumask_var(attrs->cpumask);
3126                kfree(attrs);
3127        }
3128}
3129
3130/**
3131 * alloc_workqueue_attrs - allocate a workqueue_attrs
3132 * @gfp_mask: allocation mask to use
3133 *
3134 * Allocate a new workqueue_attrs, initialize with default settings and
3135 * return it.
3136 *
3137 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3138 */
3139struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
3140{
3141        struct workqueue_attrs *attrs;
3142
3143        attrs = kzalloc(sizeof(*attrs), gfp_mask);
3144        if (!attrs)
3145                goto fail;
3146        if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
3147                goto fail;
3148
3149        cpumask_copy(attrs->cpumask, cpu_possible_mask);
3150        return attrs;
3151fail:
3152        free_workqueue_attrs(attrs);
3153        return NULL;
3154}
3155
3156static void copy_workqueue_attrs(struct workqueue_attrs *to,
3157                                 const struct workqueue_attrs *from)
3158{
3159        to->nice = from->nice;
3160        cpumask_copy(to->cpumask, from->cpumask);
3161        /*
3162         * Unlike hash and equality test, this function doesn't ignore
3163         * ->no_numa as it is used for both pool and wq attrs.  Instead,
3164         * get_unbound_pool() explicitly clears ->no_numa after copying.
3165         */
3166        to->no_numa = from->no_numa;
3167}
3168
3169/* hash value of the content of @attr */
3170static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3171{
3172        u32 hash = 0;
3173
3174        hash = jhash_1word(attrs->nice, hash);
3175        hash = jhash(cpumask_bits(attrs->cpumask),
3176                     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3177        return hash;
3178}
3179
3180/* content equality test */
3181static bool wqattrs_equal(const struct workqueue_attrs *a,
3182                          const struct workqueue_attrs *b)
3183{
3184        if (a->nice != b->nice)
3185                return false;
3186        if (!cpumask_equal(a->cpumask, b->cpumask))
3187                return false;
3188        return true;
3189}
3190
3191/**
3192 * init_worker_pool - initialize a newly zalloc'd worker_pool
3193 * @pool: worker_pool to initialize
3194 *
3195 * Initialize a newly zalloc'd @pool.  It also allocates @pool->attrs.
3196 *
3197 * Return: 0 on success, -errno on failure.  Even on failure, all fields
3198 * inside @pool proper are initialized and put_unbound_pool() can be called
3199 * on @pool safely to release it.
3200 */
3201static int init_worker_pool(struct worker_pool *pool)
3202{
3203        spin_lock_init(&pool->lock);
3204        pool->id = -1;
3205        pool->cpu = -1;
3206        pool->node = NUMA_NO_NODE;
3207        pool->flags |= POOL_DISASSOCIATED;
3208        pool->watchdog_ts = jiffies;
3209        INIT_LIST_HEAD(&pool->worklist);
3210        INIT_LIST_HEAD(&pool->idle_list);
3211        hash_init(pool->busy_hash);
3212
3213        init_timer_deferrable(&pool->idle_timer);
3214        pool->idle_timer.function = idle_worker_timeout;
3215        pool->idle_timer.data = (unsigned long)pool;
3216
3217        setup_timer(&pool->mayday_timer, pool_mayday_timeout,
3218                    (unsigned long)pool);
3219
3220        mutex_init(&pool->manager_arb);
3221        mutex_init(&pool->attach_mutex);
3222        INIT_LIST_HEAD(&pool->workers);
3223
3224        ida_init(&pool->worker_ida);
3225        INIT_HLIST_NODE(&pool->hash_node);
3226        pool->refcnt = 1;
3227
3228        /* shouldn't fail above this point */
3229        pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
3230        if (!pool->attrs)
3231                return -ENOMEM;
3232        return 0;
3233}
3234
3235static void rcu_free_wq(struct rcu_head *rcu)
3236{
3237        struct workqueue_struct *wq =
3238                container_of(rcu, struct workqueue_struct, rcu);
3239
3240        if (!(wq->flags & WQ_UNBOUND))
3241                free_percpu(wq->cpu_pwqs);
3242        else
3243                free_workqueue_attrs(wq->unbound_attrs);
3244
3245        kfree(wq->rescuer);
3246        kfree(wq);
3247}
3248
3249static void rcu_free_pool(struct rcu_head *rcu)
3250{
3251        struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3252
3253        ida_destroy(&pool->worker_ida);
3254        free_workqueue_attrs(pool->attrs);
3255        kfree(pool);
3256}
3257
3258/**
3259 * put_unbound_pool - put a worker_pool
3260 * @pool: worker_pool to put
3261 *
3262 * Put @pool.  If its refcnt reaches zero, it gets destroyed in sched-RCU
3263 * safe manner.  get_unbound_pool() calls this function on its failure path
3264 * and this function should be able to release pools which went through,
3265 * successfully or not, init_worker_pool().
3266 *
3267 * Should be called with wq_pool_mutex held.
3268 */
3269static void put_unbound_pool(struct worker_pool *pool)
3270{
3271        DECLARE_COMPLETION_ONSTACK(detach_completion);
3272        struct worker *worker;
3273
3274        lockdep_assert_held(&wq_pool_mutex);
3275
3276        if (--pool->refcnt)
3277                return;
3278
3279        /* sanity checks */
3280        if (WARN_ON(!(pool->cpu < 0)) ||
3281            WARN_ON(!list_empty(&pool->worklist)))
3282                return;
3283
3284        /* release id and unhash */
3285        if (pool->id >= 0)
3286                idr_remove(&worker_pool_idr, pool->id);
3287        hash_del(&pool->hash_node);
3288
3289        /*
3290         * Become the manager and destroy all workers.  Grabbing
3291         * manager_arb prevents @pool's workers from blocking on
3292         * attach_mutex.
3293         */
3294        mutex_lock(&pool->manager_arb);
3295
3296        spin_lock_irq(&pool->lock);
3297        while ((worker = first_idle_worker(pool)))
3298                destroy_worker(worker);
3299        WARN_ON(pool->nr_workers || pool->nr_idle);
3300        spin_unlock_irq(&pool->lock);
3301
3302        mutex_lock(&pool->attach_mutex);
3303        if (!list_empty(&pool->workers))
3304                pool->detach_completion = &detach_completion;
3305        mutex_unlock(&pool->attach_mutex);
3306
3307        if (pool->detach_completion)
3308                wait_for_completion(pool->detach_completion);
3309
3310        mutex_unlock(&pool->manager_arb);
3311
3312        /* shut down the timers */
3313        del_timer_sync(&pool->idle_timer);
3314        del_timer_sync(&pool->mayday_timer);
3315
3316        /* sched-RCU protected to allow dereferences from get_work_pool() */
3317        call_rcu_sched(&pool->rcu, rcu_free_pool);
3318}
3319
3320/**
3321 * get_unbound_pool - get a worker_pool with the specified attributes
3322 * @attrs: the attributes of the worker_pool to get
3323 *
3324 * Obtain a worker_pool which has the same attributes as @attrs, bump the
3325 * reference count and return it.  If there already is a matching
3326 * worker_pool, it will be used; otherwise, this function attempts to
3327 * create a new one.
3328 *
3329 * Should be called with wq_pool_mutex held.
3330 *
3331 * Return: On success, a worker_pool with the same attributes as @attrs.
3332 * On failure, %NULL.
3333 */
3334static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3335{
3336        u32 hash = wqattrs_hash(attrs);
3337        struct worker_pool *pool;
3338        int node;
3339        int target_node = NUMA_NO_NODE;
3340
3341        lockdep_assert_held(&wq_pool_mutex);
3342
3343        /* do we already have a matching pool? */
3344        hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3345                if (wqattrs_equal(pool->attrs, attrs)) {
3346                        pool->refcnt++;
3347                        return pool;
3348                }
3349        }
3350
3351        /* if cpumask is contained inside a NUMA node, we belong to that node */
3352        if (wq_numa_enabled) {
3353                for_each_node(node) {
3354                        if (cpumask_subset(attrs->cpumask,
3355                                           wq_numa_possible_cpumask[node])) {
3356                                target_node = node;
3357                                break;
3358                        }
3359                }
3360        }
3361
3362        /* nope, create a new one */
3363        pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
3364        if (!pool || init_worker_pool(pool) < 0)
3365                goto fail;
3366
3367        lockdep_set_subclass(&pool->lock, 1);   /* see put_pwq() */
3368        copy_workqueue_attrs(pool->attrs, attrs);
3369        pool->node = target_node;
3370
3371        /*
3372         * no_numa isn't a worker_pool attribute, always clear it.  See
3373         * 'struct workqueue_attrs' comments for detail.
3374         */
3375        pool->attrs->no_numa = false;
3376
3377        if (worker_pool_assign_id(pool) < 0)
3378                goto fail;
3379
3380        /* create and start the initial worker */
3381        if (!create_worker(pool))
3382                goto fail;
3383
3384        /* install */
3385        hash_add(unbound_pool_hash, &pool->hash_node, hash);
3386
3387        return pool;
3388fail:
3389        if (pool)
3390                put_unbound_pool(pool);
3391        return NULL;
3392}
3393
3394static void rcu_free_pwq(struct rcu_head *rcu)
3395{
3396        kmem_cache_free(pwq_cache,
3397                        container_of(rcu, struct pool_workqueue, rcu));
3398}
3399
3400/*
3401 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3402 * and needs to be destroyed.
3403 */
3404static void pwq_unbound_release_workfn(struct work_struct *work)
3405{
3406        struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3407                                                  unbound_release_work);
3408        struct workqueue_struct *wq = pwq->wq;
3409        struct worker_pool *pool = pwq->pool;
3410        bool is_last;
3411
3412        if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3413                return;
3414
3415        mutex_lock(&wq->mutex);
3416        list_del_rcu(&pwq->pwqs_node);
3417        is_last = list_empty(&wq->pwqs);
3418        mutex_unlock(&wq->mutex);
3419
3420        mutex_lock(&wq_pool_mutex);
3421        put_unbound_pool(pool);
3422        mutex_unlock(&wq_pool_mutex);
3423
3424        call_rcu_sched(&pwq->rcu, rcu_free_pwq);
3425
3426        /*
3427         * If we're the last pwq going away, @wq is already dead and no one
3428         * is gonna access it anymore.  Schedule RCU free.
3429         */
3430        if (is_last)
3431                call_rcu_sched(&wq->rcu, rcu_free_wq);
3432}
3433
3434/**
3435 * pwq_adjust_max_active - update a pwq's max_active to the current setting
3436 * @pwq: target pool_workqueue
3437 *
3438 * If @pwq isn't freezing, set @pwq->max_active to the associated
3439 * workqueue's saved_max_active and activate delayed work items
3440 * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
3441 */
3442static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3443{
3444        struct workqueue_struct *wq = pwq->wq;
3445        bool freezable = wq->flags & WQ_FREEZABLE;
3446
3447        /* for @wq->saved_max_active */
3448        lockdep_assert_held(&wq->mutex);
3449
3450        /* fast exit for non-freezable wqs */
3451        if (!freezable && pwq->max_active == wq->saved_max_active)
3452                return;
3453
3454        spin_lock_irq(&pwq->pool->lock);
3455
3456        /*
3457         * During [un]freezing, the caller is responsible for ensuring that
3458         * this function is called at least once after @workqueue_freezing
3459         * is updated and visible.
3460         */
3461        if (!freezable || !workqueue_freezing) {
3462                pwq->max_active = wq->saved_max_active;
3463
3464                while (!list_empty(&pwq->delayed_works) &&
3465                       pwq->nr_active < pwq->max_active)
3466                        pwq_activate_first_delayed(pwq);
3467
3468                /*
3469                 * Need to kick a worker after thawed or an unbound wq's
3470                 * max_active is bumped.  It's a slow path.  Do it always.
3471                 */
3472                wake_up_worker(pwq->pool);
3473        } else {
3474                pwq->max_active = 0;
3475        }
3476
3477        spin_unlock_irq(&pwq->pool->lock);
3478}
3479
3480/* initialize newly alloced @pwq which is associated with @wq and @pool */
3481static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3482                     struct worker_pool *pool)
3483{
3484        BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3485
3486        memset(pwq, 0, sizeof(*pwq));
3487
3488        pwq->pool = pool;
3489        pwq->wq = wq;
3490        pwq->flush_color = -1;
3491        pwq->refcnt = 1;
3492        INIT_LIST_HEAD(&pwq->delayed_works);
3493        INIT_LIST_HEAD(&pwq->pwqs_node);
3494        INIT_LIST_HEAD(&pwq->mayday_node);
3495        INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3496}
3497
3498/* sync @pwq with the current state of its associated wq and link it */
3499static void link_pwq(struct pool_workqueue *pwq)
3500{
3501        struct workqueue_struct *wq = pwq->wq;
3502
3503        lockdep_assert_held(&wq->mutex);
3504
3505        /* may be called multiple times, ignore if already linked */
3506        if (!list_empty(&pwq->pwqs_node))
3507                return;
3508
3509        /* set the matching work_color */
3510        pwq->work_color = wq->work_color;
3511
3512        /* sync max_active to the current setting */
3513        pwq_adjust_max_active(pwq);
3514
3515        /* link in @pwq */
3516        list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3517}
3518
3519/* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3520static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3521                                        const struct workqueue_attrs *attrs)
3522{
3523        struct worker_pool *pool;
3524        struct pool_workqueue *pwq;
3525
3526        lockdep_assert_held(&wq_pool_mutex);
3527
3528        pool = get_unbound_pool(attrs);
3529        if (!pool)
3530                return NULL;
3531
3532        pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3533        if (!pwq) {
3534                put_unbound_pool(pool);
3535                return NULL;
3536        }
3537
3538        init_pwq(pwq, wq, pool);
3539        return pwq;
3540}
3541
3542/**
3543 * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
3544 * @attrs: the wq_attrs of the default pwq of the target workqueue
3545 * @node: the target NUMA node
3546 * @cpu_going_down: if >= 0, the CPU to consider as offline
3547 * @cpumask: outarg, the resulting cpumask
3548 *
3549 * Calculate the cpumask a workqueue with @attrs should use on @node.  If
3550 * @cpu_going_down is >= 0, that cpu is considered offline during
3551 * calculation.  The result is stored in @cpumask.
3552 *
3553 * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
3554 * enabled and @node has online CPUs requested by @attrs, the returned
3555 * cpumask is the intersection of the possible CPUs of @node and
3556 * @attrs->cpumask.
3557 *
3558 * The caller is responsible for ensuring that the cpumask of @node stays
3559 * stable.
3560 *
3561 * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3562 * %false if equal.
3563 */
3564static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3565                                 int cpu_going_down, cpumask_t *cpumask)
3566{
3567        if (!wq_numa_enabled || attrs->no_numa)
3568                goto use_dfl;
3569
3570        /* does @node have any online CPUs @attrs wants? */
3571        cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3572        if (cpu_going_down >= 0)
3573                cpumask_clear_cpu(cpu_going_down, cpumask);
3574
3575        if (cpumask_empty(cpumask))
3576                goto use_dfl;
3577
3578        /* yeap, return possible CPUs in @node that @attrs wants */
3579        cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3580        return !cpumask_equal(cpumask, attrs->cpumask);
3581
3582use_dfl:
3583        cpumask_copy(cpumask, attrs->cpumask);
3584        return false;
3585}
3586
3587/* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
3588static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3589                                                   int node,
3590                                                   struct pool_workqueue *pwq)
3591{
3592        struct pool_workqueue *old_pwq;
3593
3594        lockdep_assert_held(&wq_pool_mutex);
3595        lockdep_assert_held(&wq->mutex);
3596
3597        /* link_pwq() can handle duplicate calls */
3598        link_pwq(pwq);
3599
3600        old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3601        rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3602        return old_pwq;
3603}
3604
3605/* context to store the prepared attrs & pwqs before applying */
3606struct apply_wqattrs_ctx {
3607        struct workqueue_struct *wq;            /* target workqueue */
3608        struct workqueue_attrs  *attrs;         /* attrs to apply */
3609        struct list_head        list;           /* queued for batching commit */
3610        struct pool_workqueue   *dfl_pwq;
3611        struct pool_workqueue   *pwq_tbl[];
3612};
3613
3614/* free the resources after success or abort */
3615static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
3616{
3617        if (ctx) {
3618                int node;
3619
3620                for_each_node(node)
3621                        put_pwq_unlocked(ctx->pwq_tbl[node]);
3622                put_pwq_unlocked(ctx->dfl_pwq);
3623
3624                free_workqueue_attrs(ctx->attrs);
3625
3626                kfree(ctx);
3627        }
3628}
3629
3630/* allocate the attrs and pwqs for later installation */
3631static struct apply_wqattrs_ctx *
3632apply_wqattrs_prepare(struct workqueue_struct *wq,
3633                      const struct workqueue_attrs *attrs)
3634{
3635        struct apply_wqattrs_ctx *ctx;
3636        struct workqueue_attrs *new_attrs, *tmp_attrs;
3637        int node;
3638
3639        lockdep_assert_held(&wq_pool_mutex);
3640
3641        ctx = kzalloc(sizeof(*ctx) + nr_node_ids * sizeof(ctx->pwq_tbl[0]),
3642                      GFP_KERNEL);
3643
3644        new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3645        tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3646        if (!ctx || !new_attrs || !tmp_attrs)
3647                goto out_free;
3648
3649        /*
3650         * Calculate the attrs of the default pwq.
3651         * If the user configured cpumask doesn't overlap with the
3652         * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
3653         */
3654        copy_workqueue_attrs(new_attrs, attrs);
3655        cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
3656        if (unlikely(cpumask_empty(new_attrs->cpumask)))
3657                cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
3658
3659        /*
3660         * We may create multiple pwqs with differing cpumasks.  Make a
3661         * copy of @new_attrs which will be modified and used to obtain
3662         * pools.
3663         */
3664        copy_workqueue_attrs(tmp_attrs, new_attrs);
3665
3666        /*
3667         * If something goes wrong during CPU up/down, we'll fall back to
3668         * the default pwq covering whole @attrs->cpumask.  Always create
3669         * it even if we don't use it immediately.
3670         */
3671        ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3672        if (!ctx->dfl_pwq)
3673                goto out_free;
3674
3675        for_each_node(node) {
3676                if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
3677                        ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3678                        if (!ctx->pwq_tbl[node])
3679                                goto out_free;
3680                } else {
3681                        ctx->dfl_pwq->refcnt++;
3682                        ctx->pwq_tbl[node] = ctx->dfl_pwq;
3683                }
3684        }
3685
3686        /* save the user configured attrs and sanitize it. */
3687        copy_workqueue_attrs(new_attrs, attrs);
3688        cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
3689        ctx->attrs = new_attrs;
3690
3691        ctx->wq = wq;
3692        free_workqueue_attrs(tmp_attrs);
3693        return ctx;
3694
3695out_free:
3696        free_workqueue_attrs(tmp_attrs);
3697        free_workqueue_attrs(new_attrs);
3698        apply_wqattrs_cleanup(ctx);
3699        return NULL;
3700}
3701
3702/* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
3703static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
3704{
3705        int node;
3706
3707        /* all pwqs have been created successfully, let's install'em */
3708        mutex_lock(&ctx->wq->mutex);
3709
3710        copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
3711
3712        /* save the previous pwq and install the new one */
3713        for_each_node(node)
3714                ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
3715                                                          ctx->pwq_tbl[node]);
3716
3717        /* @dfl_pwq might not have been used, ensure it's linked */
3718        link_pwq(ctx->dfl_pwq);
3719        swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
3720
3721        mutex_unlock(&ctx->wq->mutex);
3722}
3723
3724static void apply_wqattrs_lock(void)
3725{
3726        /* CPUs should stay stable across pwq creations and installations */
3727        get_online_cpus();
3728        mutex_lock(&wq_pool_mutex);
3729}
3730
3731static void apply_wqattrs_unlock(void)
3732{
3733        mutex_unlock(&wq_pool_mutex);
3734        put_online_cpus();
3735}
3736
3737static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
3738                                        const struct workqueue_attrs *attrs)
3739{
3740        struct apply_wqattrs_ctx *ctx;
3741
3742        /* only unbound workqueues can change attributes */
3743        if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
3744                return -EINVAL;
3745
3746        /* creating multiple pwqs breaks ordering guarantee */
3747        if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
3748                return -EINVAL;
3749
3750        ctx = apply_wqattrs_prepare(wq, attrs);
3751        if (!ctx)
3752                return -ENOMEM;
3753
3754        /* the ctx has been prepared successfully, let's commit it */
3755        apply_wqattrs_commit(ctx);
3756        apply_wqattrs_cleanup(ctx);
3757
3758        return 0;
3759}
3760
3761/**
3762 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
3763 * @wq: the target workqueue
3764 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
3765 *
3766 * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
3767 * machines, this function maps a separate pwq to each NUMA node with
3768 * possibles CPUs in @attrs->cpumask so that work items are affine to the
3769 * NUMA node it was issued on.  Older pwqs are released as in-flight work
3770 * items finish.  Note that a work item which repeatedly requeues itself
3771 * back-to-back will stay on its current pwq.
3772 *
3773 * Performs GFP_KERNEL allocations.
3774 *
3775 * Return: 0 on success and -errno on failure.
3776 */
3777int apply_workqueue_attrs(struct workqueue_struct *wq,
3778                          const struct workqueue_attrs *attrs)
3779{
3780        int ret;
3781
3782        apply_wqattrs_lock();
3783        ret = apply_workqueue_attrs_locked(wq, attrs);
3784        apply_wqattrs_unlock();
3785
3786        return ret;
3787}
3788
3789/**
3790 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
3791 * @wq: the target workqueue
3792 * @cpu: the CPU coming up or going down
3793 * @online: whether @cpu is coming up or going down
3794 *
3795 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
3796 * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update NUMA affinity of
3797 * @wq accordingly.
3798 *
3799 * If NUMA affinity can't be adjusted due to memory allocation failure, it
3800 * falls back to @wq->dfl_pwq which may not be optimal but is always
3801 * correct.
3802 *
3803 * Note that when the last allowed CPU of a NUMA node goes offline for a
3804 * workqueue with a cpumask spanning multiple nodes, the workers which were
3805 * already executing the work items for the workqueue will lose their CPU
3806 * affinity and may execute on any CPU.  This is similar to how per-cpu
3807 * workqueues behave on CPU_DOWN.  If a workqueue user wants strict
3808 * affinity, it's the user's responsibility to flush the work item from
3809 * CPU_DOWN_PREPARE.
3810 */
3811static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
3812                                   bool online)
3813{
3814        int node = cpu_to_node(cpu);
3815        int cpu_off = online ? -1 : cpu;
3816        struct pool_workqueue *old_pwq = NULL, *pwq;
3817        struct workqueue_attrs *target_attrs;
3818        cpumask_t *cpumask;
3819
3820        lockdep_assert_held(&wq_pool_mutex);
3821
3822        if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
3823            wq->unbound_attrs->no_numa)
3824                return;
3825
3826        /*
3827         * We don't wanna alloc/free wq_attrs for each wq for each CPU.
3828         * Let's use a preallocated one.  The following buf is protected by
3829         * CPU hotplug exclusion.
3830         */
3831        target_attrs = wq_update_unbound_numa_attrs_buf;
3832        cpumask = target_attrs->cpumask;
3833
3834        copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
3835        pwq = unbound_pwq_by_node(wq, node);
3836
3837        /*
3838         * Let's determine what needs to be done.  If the target cpumask is
3839         * different from the default pwq's, we need to compare it to @pwq's
3840         * and create a new one if they don't match.  If the target cpumask
3841         * equals the default pwq's, the default pwq should be used.
3842         */
3843        if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
3844                if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
3845                        return;
3846        } else {
3847                goto use_dfl_pwq;
3848        }
3849
3850        /* create a new pwq */
3851        pwq = alloc_unbound_pwq(wq, target_attrs);
3852        if (!pwq) {
3853                pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
3854                        wq->name);
3855                goto use_dfl_pwq;
3856        }
3857
3858        /* Install the new pwq. */
3859        mutex_lock(&wq->mutex);
3860        old_pwq = numa_pwq_tbl_install(wq, node, pwq);
3861        goto out_unlock;
3862
3863use_dfl_pwq:
3864        mutex_lock(&wq->mutex);
3865        spin_lock_irq(&wq->dfl_pwq->pool->lock);
3866        get_pwq(wq->dfl_pwq);
3867        spin_unlock_irq(&wq->dfl_pwq->pool->lock);
3868        old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
3869out_unlock:
3870        mutex_unlock(&wq->mutex);
3871        put_pwq_unlocked(old_pwq);
3872}
3873
3874static int alloc_and_link_pwqs(struct workqueue_struct *wq)
3875{
3876        bool highpri = wq->flags & WQ_HIGHPRI;
3877        int cpu, ret;
3878
3879        if (!(wq->flags & WQ_UNBOUND)) {
3880                wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
3881                if (!wq->cpu_pwqs)
3882                        return -ENOMEM;
3883
3884                for_each_possible_cpu(cpu) {
3885                        struct pool_workqueue *pwq =
3886                                per_cpu_ptr(wq->cpu_pwqs, cpu);
3887                        struct worker_pool *cpu_pools =
3888                                per_cpu(cpu_worker_pools, cpu);
3889
3890                        init_pwq(pwq, wq, &cpu_pools[highpri]);
3891
3892                        mutex_lock(&wq->mutex);
3893                        link_pwq(pwq);
3894                        mutex_unlock(&wq->mutex);
3895                }
3896                return 0;
3897        } else if (wq->flags & __WQ_ORDERED) {
3898                ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
3899                /* there should only be single pwq for ordering guarantee */
3900                WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
3901                              wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
3902                     "ordering guarantee broken for workqueue %s\n", wq->name);
3903                return ret;
3904        } else {
3905                return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
3906        }
3907}
3908
3909static int wq_clamp_max_active(int max_active, unsigned int flags,
3910                               const char *name)
3911{
3912        int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
3913
3914        if (max_active < 1 || max_active > lim)
3915                pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
3916                        max_active, name, 1, lim);
3917
3918        return clamp_val(max_active, 1, lim);
3919}
3920
3921struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3922                                               unsigned int flags,
3923                                               int max_active,
3924                                               struct lock_class_key *key,
3925                                               const char *lock_name, ...)
3926{
3927        size_t tbl_size = 0;
3928        va_list args;
3929        struct workqueue_struct *wq;
3930        struct pool_workqueue *pwq;
3931
3932        /* see the comment above the definition of WQ_POWER_EFFICIENT */
3933        if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
3934                flags |= WQ_UNBOUND;
3935
3936        /* allocate wq and format name */
3937        if (flags & WQ_UNBOUND)
3938                tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
3939
3940        wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
3941        if (!wq)
3942                return NULL;
3943
3944        if (flags & WQ_UNBOUND) {
3945                wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3946                if (!wq->unbound_attrs)
3947                        goto err_free_wq;
3948        }
3949
3950        va_start(args, lock_name);
3951        vsnprintf(wq->name, sizeof(wq->name), fmt, args);
3952        va_end(args);
3953
3954        max_active = max_active ?: WQ_DFL_ACTIVE;
3955        max_active = wq_clamp_max_active(max_active, flags, wq->name);
3956
3957        /* init wq */
3958        wq->flags = flags;
3959        wq->saved_max_active = max_active;
3960        mutex_init(&wq->mutex);
3961        atomic_set(&wq->nr_pwqs_to_flush, 0);
3962        INIT_LIST_HEAD(&wq->pwqs);
3963        INIT_LIST_HEAD(&wq->flusher_queue);
3964        INIT_LIST_HEAD(&wq->flusher_overflow);
3965        INIT_LIST_HEAD(&wq->maydays);
3966
3967        lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
3968        INIT_LIST_HEAD(&wq->list);
3969
3970        if (alloc_and_link_pwqs(wq) < 0)
3971                goto err_free_wq;
3972
3973        /*
3974         * Workqueues which may be used during memory reclaim should
3975         * have a rescuer to guarantee forward progress.
3976         */
3977        if (flags & WQ_MEM_RECLAIM) {
3978                struct worker *rescuer;
3979
3980                rescuer = alloc_worker(NUMA_NO_NODE);
3981                if (!rescuer)
3982                        goto err_destroy;
3983
3984                rescuer->rescue_wq = wq;
3985                rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
3986                                               wq->name);
3987                if (IS_ERR(rescuer->task)) {
3988                        kfree(rescuer);
3989                        goto err_destroy;
3990                }
3991
3992                wq->rescuer = rescuer;
3993                kthread_bind_mask(rescuer->task, cpu_possible_mask);
3994                wake_up_process(rescuer->task);
3995        }
3996
3997        if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
3998                goto err_destroy;
3999
4000        /*
4001         * wq_pool_mutex protects global freeze state and workqueues list.
4002         * Grab it, adjust max_active and add the new @wq to workqueues
4003         * list.
4004         */
4005        mutex_lock(&wq_pool_mutex);
4006
4007        mutex_lock(&wq->mutex);
4008        for_each_pwq(pwq, wq)
4009                pwq_adjust_max_active(pwq);
4010        mutex_unlock(&wq->mutex);
4011
4012        list_add_tail_rcu(&wq->list, &workqueues);
4013
4014        mutex_unlock(&wq_pool_mutex);
4015
4016        return wq;
4017
4018err_free_wq:
4019        free_workqueue_attrs(wq->unbound_attrs);
4020        kfree(wq);
4021        return NULL;
4022err_destroy:
4023        destroy_workqueue(wq);
4024        return NULL;
4025}
4026EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
4027
4028/**
4029 * destroy_workqueue - safely terminate a workqueue
4030 * @wq: target workqueue
4031 *
4032 * Safely destroy a workqueue. All work currently pending will be done first.
4033 */
4034void destroy_workqueue(struct workqueue_struct *wq)
4035{
4036        struct pool_workqueue *pwq;
4037        int node;
4038
4039        /* drain it before proceeding with destruction */
4040        drain_workqueue(wq);
4041
4042        /* sanity checks */
4043        mutex_lock(&wq->mutex);
4044        for_each_pwq(pwq, wq) {
4045                int i;
4046
4047                for (i = 0; i < WORK_NR_COLORS; i++) {
4048                        if (WARN_ON(pwq->nr_in_flight[i])) {
4049                                mutex_unlock(&wq->mutex);
4050                                return;
4051                        }
4052                }
4053
4054                if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
4055                    WARN_ON(pwq->nr_active) ||
4056                    WARN_ON(!list_empty(&pwq->delayed_works))) {
4057                        mutex_unlock(&wq->mutex);
4058                        return;
4059                }
4060        }
4061        mutex_unlock(&wq->mutex);
4062
4063        /*
4064         * wq list is used to freeze wq, remove from list after
4065         * flushing is complete in case freeze races us.
4066         */
4067        mutex_lock(&wq_pool_mutex);
4068        list_del_rcu(&wq->list);
4069        mutex_unlock(&wq_pool_mutex);
4070
4071        workqueue_sysfs_unregister(wq);
4072
4073        if (wq->rescuer)
4074                kthread_stop(wq->rescuer->task);
4075
4076        if (!(wq->flags & WQ_UNBOUND)) {
4077                /*
4078                 * The base ref is never dropped on per-cpu pwqs.  Directly
4079                 * schedule RCU free.
4080                 */
4081                call_rcu_sched(&wq->rcu, rcu_free_wq);
4082        } else {
4083                /*
4084                 * We're the sole accessor of @wq at this point.  Directly
4085                 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
4086                 * @wq will be freed when the last pwq is released.
4087                 */
4088                for_each_node(node) {
4089                        pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4090                        RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4091                        put_pwq_unlocked(pwq);
4092                }
4093
4094                /*
4095                 * Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
4096                 * put.  Don't access it afterwards.
4097                 */
4098                pwq = wq->dfl_pwq;
4099                wq->dfl_pwq = NULL;
4100                put_pwq_unlocked(pwq);
4101        }
4102}
4103EXPORT_SYMBOL_GPL(destroy_workqueue);
4104
4105/**
4106 * workqueue_set_max_active - adjust max_active of a workqueue
4107 * @wq: target workqueue
4108 * @max_active: new max_active value.
4109 *
4110 * Set max_active of @wq to @max_active.
4111 *
4112 * CONTEXT:
4113 * Don't call from IRQ context.
4114 */
4115void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4116{
4117        struct pool_workqueue *pwq;
4118
4119        /* disallow meddling with max_active for ordered workqueues */
4120        if (WARN_ON(wq->flags & __WQ_ORDERED))
4121                return;
4122
4123        max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4124
4125        mutex_lock(&wq->mutex);
4126
4127        wq->saved_max_active = max_active;
4128
4129        for_each_pwq(pwq, wq)
4130                pwq_adjust_max_active(pwq);
4131
4132        mutex_unlock(&wq->mutex);
4133}
4134EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4135
4136/**
4137 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4138 *
4139 * Determine whether %current is a workqueue rescuer.  Can be used from
4140 * work functions to determine whether it's being run off the rescuer task.
4141 *
4142 * Return: %true if %current is a workqueue rescuer. %false otherwise.
4143 */
4144bool current_is_workqueue_rescuer(void)
4145{
4146        struct worker *worker = current_wq_worker();
4147
4148        return worker && worker->rescue_wq;
4149}
4150
4151/**
4152 * workqueue_congested - test whether a workqueue is congested
4153 * @cpu: CPU in question
4154 * @wq: target workqueue
4155 *
4156 * Test whether @wq's cpu workqueue for @cpu is congested.  There is
4157 * no synchronization around this function and the test result is
4158 * unreliable and only useful as advisory hints or for debugging.
4159 *
4160 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4161 * Note that both per-cpu and unbound workqueues may be associated with
4162 * multiple pool_workqueues which have separate congested states.  A
4163 * workqueue being congested on one CPU doesn't mean the workqueue is also
4164 * contested on other CPUs / NUMA nodes.
4165 *
4166 * Return:
4167 * %true if congested, %false otherwise.
4168 */
4169bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4170{
4171        struct pool_workqueue *pwq;
4172        bool ret;
4173
4174        rcu_read_lock_sched();
4175
4176        if (cpu == WORK_CPU_UNBOUND)
4177                cpu = smp_processor_id();
4178
4179        if (!(wq->flags & WQ_UNBOUND))
4180                pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4181        else
4182                pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4183
4184        ret = !list_empty(&pwq->delayed_works);
4185        rcu_read_unlock_sched();
4186
4187        return ret;
4188}
4189EXPORT_SYMBOL_GPL(workqueue_congested);
4190
4191/**
4192 * work_busy - test whether a work is currently pending or running
4193 * @work: the work to be tested
4194 *
4195 * Test whether @work is currently pending or running.  There is no
4196 * synchronization around this function and the test result is
4197 * unreliable and only useful as advisory hints or for debugging.
4198 *
4199 * Return:
4200 * OR'd bitmask of WORK_BUSY_* bits.
4201 */
4202unsigned int work_busy(struct work_struct *work)
4203{
4204        struct worker_pool *pool;
4205        unsigned long flags;
4206        unsigned int ret = 0;
4207
4208        if (work_pending(work))
4209                ret |= WORK_BUSY_PENDING;
4210
4211        local_irq_save(flags);
4212        pool = get_work_pool(work);
4213        if (pool) {
4214                spin_lock(&pool->lock);
4215                if (find_worker_executing_work(pool, work))
4216                        ret |= WORK_BUSY_RUNNING;
4217                spin_unlock(&pool->lock);
4218        }
4219        local_irq_restore(flags);
4220
4221        return ret;
4222}
4223EXPORT_SYMBOL_GPL(work_busy);
4224
4225/**
4226 * set_worker_desc - set description for the current work item
4227 * @fmt: printf-style format string
4228 * @...: arguments for the format string
4229 *
4230 * This function can be called by a running work function to describe what
4231 * the work item is about.  If the worker task gets dumped, this
4232 * information will be printed out together to help debugging.  The
4233 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4234 */
4235void set_worker_desc(const char *fmt, ...)
4236{
4237        struct worker *worker = current_wq_worker();
4238        va_list args;
4239
4240        if (worker) {
4241                va_start(args, fmt);
4242                vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4243                va_end(args);
4244                worker->desc_valid = true;
4245        }
4246}
4247
4248/**
4249 * print_worker_info - print out worker information and description
4250 * @log_lvl: the log level to use when printing
4251 * @task: target task
4252 *
4253 * If @task is a worker and currently executing a work item, print out the
4254 * name of the workqueue being serviced and worker description set with
4255 * set_worker_desc() by the currently executing work item.
4256 *
4257 * This function can be safely called on any task as long as the
4258 * task_struct itself is accessible.  While safe, this function isn't
4259 * synchronized and may print out mixups or garbages of limited length.
4260 */
4261void print_worker_info(const char *log_lvl, struct task_struct *task)
4262{
4263        work_func_t *fn = NULL;
4264        char name[WQ_NAME_LEN] = { };
4265        char desc[WORKER_DESC_LEN] = { };
4266        struct pool_workqueue *pwq = NULL;
4267        struct workqueue_struct *wq = NULL;
4268        bool desc_valid = false;
4269        struct worker *worker;
4270
4271        if (!(task->flags & PF_WQ_WORKER))
4272                return;
4273
4274        /*
4275         * This function is called without any synchronization and @task
4276         * could be in any state.  Be careful with dereferences.
4277         */
4278        worker = probe_kthread_data(task);
4279
4280        /*
4281         * Carefully copy the associated workqueue's workfn and name.  Keep
4282         * the original last '\0' in case the original contains garbage.
4283         */
4284        probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
4285        probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
4286        probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
4287        probe_kernel_read(name, wq->name, sizeof(name) - 1);
4288
4289        /* copy worker description */
4290        probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid));
4291        if (desc_valid)
4292                probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
4293
4294        if (fn || name[0] || desc[0]) {
4295                printk("%sWorkqueue: %s %pf", log_lvl, name, fn);
4296                if (desc[0])
4297                        pr_cont(" (%s)", desc);
4298                pr_cont("\n");
4299        }
4300}
4301
4302static void pr_cont_pool_info(struct worker_pool *pool)
4303{
4304        pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
4305        if (pool->node != NUMA_NO_NODE)
4306                pr_cont(" node=%d", pool->node);
4307        pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
4308}
4309
4310static void pr_cont_work(bool comma, struct work_struct *work)
4311{
4312        if (work->func == wq_barrier_func) {
4313                struct wq_barrier *barr;
4314
4315                barr = container_of(work, struct wq_barrier, work);
4316
4317                pr_cont("%s BAR(%d)", comma ? "," : "",
4318                        task_pid_nr(barr->task));
4319        } else {
4320                pr_cont("%s %pf", comma ? "," : "", work->func);
4321        }
4322}
4323
4324static void show_pwq(struct pool_workqueue *pwq)
4325{
4326        struct worker_pool *pool = pwq->pool;
4327        struct work_struct *work;
4328        struct worker *worker;
4329        bool has_in_flight = false, has_pending = false;
4330        int bkt;
4331
4332        pr_info("  pwq %d:", pool->id);
4333        pr_cont_pool_info(pool);
4334
4335        pr_cont(" active=%d/%d%s\n", pwq->nr_active, pwq->max_active,
4336                !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
4337
4338        hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4339                if (worker->current_pwq == pwq) {
4340                        has_in_flight = true;
4341                        break;
4342                }
4343        }
4344        if (has_in_flight) {
4345                bool comma = false;
4346
4347                pr_info("    in-flight:");
4348                hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4349                        if (worker->current_pwq != pwq)
4350                                continue;
4351
4352                        pr_cont("%s %d%s:%pf", comma ? "," : "",
4353                                task_pid_nr(worker->task),
4354                                worker == pwq->wq->rescuer ? "(RESCUER)" : "",
4355                                worker->current_func);
4356                        list_for_each_entry(work, &worker->scheduled, entry)
4357                                pr_cont_work(false, work);
4358                        comma = true;
4359                }
4360                pr_cont("\n");
4361        }
4362
4363        list_for_each_entry(work, &pool->worklist, entry) {
4364                if (get_work_pwq(work) == pwq) {
4365                        has_pending = true;
4366                        break;
4367                }
4368        }
4369        if (has_pending) {
4370                bool comma = false;
4371
4372                pr_info("    pending:");
4373                list_for_each_entry(work, &pool->worklist, entry) {
4374                        if (get_work_pwq(work) != pwq)
4375                                continue;
4376
4377                        pr_cont_work(comma, work);
4378                        comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4379                }
4380                pr_cont("\n");
4381        }
4382
4383        if (!list_empty(&pwq->delayed_works)) {
4384                bool comma = false;
4385
4386                pr_info("    delayed:");
4387                list_for_each_entry(work, &pwq->delayed_works, entry) {
4388                        pr_cont_work(comma, work);
4389                        comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4390                }
4391                pr_cont("\n");
4392        }
4393}
4394
4395/**
4396 * show_workqueue_state - dump workqueue state
4397 *
4398 * Called from a sysrq handler and prints out all busy workqueues and
4399 * pools.
4400 */
4401void show_workqueue_state(void)
4402{
4403        struct workqueue_struct *wq;
4404        struct worker_pool *pool;
4405        unsigned long flags;
4406        int pi;
4407
4408        rcu_read_lock_sched();
4409
4410        pr_info("Showing busy workqueues and worker pools:\n");
4411
4412        list_for_each_entry_rcu(wq, &workqueues, list) {
4413                struct pool_workqueue *pwq;
4414                bool idle = true;
4415
4416                for_each_pwq(pwq, wq) {
4417                        if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
4418                                idle = false;
4419                                break;
4420                        }
4421                }
4422                if (idle)
4423                        continue;
4424
4425                pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4426
4427                for_each_pwq(pwq, wq) {
4428                        spin_lock_irqsave(&pwq->pool->lock, flags);
4429                        if (pwq->nr_active || !list_empty(&pwq->delayed_works))
4430                                show_pwq(pwq);
4431                        spin_unlock_irqrestore(&pwq->pool->lock, flags);
4432                }
4433        }
4434
4435        for_each_pool(pool, pi) {
4436                struct worker *worker;
4437                bool first = true;
4438
4439                spin_lock_irqsave(&pool->lock, flags);
4440                if (pool->nr_workers == pool->nr_idle)
4441                        goto next_pool;
4442
4443                pr_info("pool %d:", pool->id);
4444                pr_cont_pool_info(pool);
4445                pr_cont(" hung=%us workers=%d",
4446                        jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
4447                        pool->nr_workers);
4448                if (pool->manager)
4449                        pr_cont(" manager: %d",
4450                                task_pid_nr(pool->manager->task));
4451                list_for_each_entry(worker, &pool->idle_list, entry) {
4452                        pr_cont(" %s%d", first ? "idle: " : "",
4453                                task_pid_nr(worker->task));
4454                        first = false;
4455                }
4456                pr_cont("\n");
4457        next_pool:
4458                spin_unlock_irqrestore(&pool->lock, flags);
4459        }
4460
4461        rcu_read_unlock_sched();
4462}
4463
4464/*
4465 * CPU hotplug.
4466 *
4467 * There are two challenges in supporting CPU hotplug.  Firstly, there
4468 * are a lot of assumptions on strong associations among work, pwq and
4469 * pool which make migrating pending and scheduled works very
4470 * difficult to implement without impacting hot paths.  Secondly,
4471 * worker pools serve mix of short, long and very long running works making
4472 * blocked draining impractical.
4473 *
4474 * This is solved by allowing the pools to be disassociated from the CPU
4475 * running as an unbound one and allowing it to be reattached later if the
4476 * cpu comes back online.
4477 */
4478
4479static void wq_unbind_fn(struct work_struct *work)
4480{
4481        int cpu = smp_processor_id();
4482        struct worker_pool *pool;
4483        struct worker *worker;
4484
4485        for_each_cpu_worker_pool(pool, cpu) {
4486                mutex_lock(&pool->attach_mutex);
4487                spin_lock_irq(&pool->lock);
4488
4489                /*
4490                 * We've blocked all attach/detach operations. Make all workers
4491                 * unbound and set DISASSOCIATED.  Before this, all workers
4492                 * except for the ones which are still executing works from
4493                 * before the last CPU down must be on the cpu.  After
4494                 * this, they may become diasporas.
4495                 */
4496                for_each_pool_worker(worker, pool)
4497                        worker->flags |= WORKER_UNBOUND;
4498
4499                pool->flags |= POOL_DISASSOCIATED;
4500
4501                spin_unlock_irq(&pool->lock);
4502                mutex_unlock(&pool->attach_mutex);
4503
4504                /*
4505                 * Call schedule() so that we cross rq->lock and thus can
4506                 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
4507                 * This is necessary as scheduler callbacks may be invoked
4508                 * from other cpus.
4509                 */
4510                schedule();
4511
4512                /*
4513                 * Sched callbacks are disabled now.  Zap nr_running.
4514                 * After this, nr_running stays zero and need_more_worker()
4515                 * and keep_working() are always true as long as the
4516                 * worklist is not empty.  This pool now behaves as an
4517                 * unbound (in terms of concurrency management) pool which
4518                 * are served by workers tied to the pool.
4519                 */
4520                atomic_set(&pool->nr_running, 0);
4521
4522                /*
4523                 * With concurrency management just turned off, a busy
4524                 * worker blocking could lead to lengthy stalls.  Kick off
4525                 * unbound chain execution of currently pending work items.
4526                 */
4527                spin_lock_irq(&pool->lock);
4528                wake_up_worker(pool);
4529                spin_unlock_irq(&pool->lock);
4530        }
4531}
4532
4533/**
4534 * rebind_workers - rebind all workers of a pool to the associated CPU
4535 * @pool: pool of interest
4536 *
4537 * @pool->cpu is coming online.  Rebind all workers to the CPU.
4538 */
4539static void rebind_workers(struct worker_pool *pool)
4540{
4541        struct worker *worker;
4542
4543        lockdep_assert_held(&pool->attach_mutex);
4544
4545        /*
4546         * Restore CPU affinity of all workers.  As all idle workers should
4547         * be on the run-queue of the associated CPU before any local
4548         * wake-ups for concurrency management happen, restore CPU affinity
4549         * of all workers first and then clear UNBOUND.  As we're called
4550         * from CPU_ONLINE, the following shouldn't fail.
4551         */
4552        for_each_pool_worker(worker, pool)
4553                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4554                                                  pool->attrs->cpumask) < 0);
4555
4556        spin_lock_irq(&pool->lock);
4557
4558        /*
4559         * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
4560         * w/o preceding DOWN_PREPARE.  Work around it.  CPU hotplug is
4561         * being reworked and this can go away in time.
4562         */
4563        if (!(pool->flags & POOL_DISASSOCIATED)) {
4564                spin_unlock_irq(&pool->lock);
4565                return;
4566        }
4567
4568        pool->flags &= ~POOL_DISASSOCIATED;
4569
4570        for_each_pool_worker(worker, pool) {
4571                unsigned int worker_flags = worker->flags;
4572
4573                /*
4574                 * A bound idle worker should actually be on the runqueue
4575                 * of the associated CPU for local wake-ups targeting it to
4576                 * work.  Kick all idle workers so that they migrate to the
4577                 * associated CPU.  Doing this in the same loop as
4578                 * replacing UNBOUND with REBOUND is safe as no worker will
4579                 * be bound before @pool->lock is released.
4580                 */
4581                if (worker_flags & WORKER_IDLE)
4582                        wake_up_process(worker->task);
4583
4584                /*
4585                 * We want to clear UNBOUND but can't directly call
4586                 * worker_clr_flags() or adjust nr_running.  Atomically
4587                 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
4588                 * @worker will clear REBOUND using worker_clr_flags() when
4589                 * it initiates the next execution cycle thus restoring
4590                 * concurrency management.  Note that when or whether
4591                 * @worker clears REBOUND doesn't affect correctness.
4592                 *
4593                 * ACCESS_ONCE() is necessary because @worker->flags may be
4594                 * tested without holding any lock in
4595                 * wq_worker_waking_up().  Without it, NOT_RUNNING test may
4596                 * fail incorrectly leading to premature concurrency
4597                 * management operations.
4598                 */
4599                WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4600                worker_flags |= WORKER_REBOUND;
4601                worker_flags &= ~WORKER_UNBOUND;
4602                ACCESS_ONCE(worker->flags) = worker_flags;
4603        }
4604
4605        spin_unlock_irq(&pool->lock);
4606}
4607
4608/**
4609 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
4610 * @pool: unbound pool of interest
4611 * @cpu: the CPU which is coming up
4612 *
4613 * An unbound pool may end up with a cpumask which doesn't have any online
4614 * CPUs.  When a worker of such pool get scheduled, the scheduler resets
4615 * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
4616 * online CPU before, cpus_allowed of all its workers should be restored.
4617 */
4618static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4619{
4620        static cpumask_t cpumask;
4621        struct worker *worker;
4622
4623        lockdep_assert_held(&pool->attach_mutex);
4624
4625        /* is @cpu allowed for @pool? */
4626        if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
4627                return;
4628
4629        /* is @cpu the only online CPU? */
4630        cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
4631        if (cpumask_weight(&cpumask) != 1)
4632                return;
4633
4634        /* as we're called from CPU_ONLINE, the following shouldn't fail */
4635        for_each_pool_worker(worker, pool)
4636                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4637                                                  pool->attrs->cpumask) < 0);
4638}
4639
4640/*
4641 * Workqueues should be brought up before normal priority CPU notifiers.
4642 * This will be registered high priority CPU notifier.
4643 */
4644static int workqueue_cpu_up_callback(struct notifier_block *nfb,
4645                                               unsigned long action,
4646                                               void *hcpu)
4647{
4648        int cpu = (unsigned long)hcpu;
4649        struct worker_pool *pool;
4650        struct workqueue_struct *wq;
4651        int pi;
4652
4653        switch (action & ~CPU_TASKS_FROZEN) {
4654        case CPU_UP_PREPARE:
4655                for_each_cpu_worker_pool(pool, cpu) {
4656                        if (pool->nr_workers)
4657                                continue;
4658                        if (!create_worker(pool))
4659                                return NOTIFY_BAD;
4660                }
4661                break;
4662
4663        case CPU_DOWN_FAILED:
4664        case CPU_ONLINE:
4665                mutex_lock(&wq_pool_mutex);
4666
4667                for_each_pool(pool, pi) {
4668                        mutex_lock(&pool->attach_mutex);
4669
4670                        if (pool->cpu == cpu)
4671                                rebind_workers(pool);
4672                        else if (pool->cpu < 0)
4673                                restore_unbound_workers_cpumask(pool, cpu);
4674
4675                        mutex_unlock(&pool->attach_mutex);
4676                }
4677
4678                /* update NUMA affinity of unbound workqueues */
4679                list_for_each_entry(wq, &workqueues, list)
4680                        wq_update_unbound_numa(wq, cpu, true);
4681
4682                mutex_unlock(&wq_pool_mutex);
4683                break;
4684        }
4685        return NOTIFY_OK;
4686}
4687
4688/*
4689 * Workqueues should be brought down after normal priority CPU notifiers.
4690 * This will be registered as low priority CPU notifier.
4691 */
4692static int workqueue_cpu_down_callback(struct notifier_block *nfb,
4693                                                 unsigned long action,
4694                                                 void *hcpu)
4695{
4696        int cpu = (unsigned long)hcpu;
4697        struct work_struct unbind_work;
4698        struct workqueue_struct *wq;
4699
4700        switch (action & ~CPU_TASKS_FROZEN) {
4701        case CPU_DOWN_PREPARE:
4702                /* unbinding per-cpu workers should happen on the local CPU */
4703                INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
4704                queue_work_on(cpu, system_highpri_wq, &unbind_work);
4705
4706                /* update NUMA affinity of unbound workqueues */
4707                mutex_lock(&wq_pool_mutex);
4708                list_for_each_entry(wq, &workqueues, list)
4709                        wq_update_unbound_numa(wq, cpu, false);
4710                mutex_unlock(&wq_pool_mutex);
4711
4712                /* wait for per-cpu unbinding to finish */
4713                flush_work(&unbind_work);
4714                destroy_work_on_stack(&unbind_work);
4715                break;
4716        }
4717        return NOTIFY_OK;
4718}
4719
4720#ifdef CONFIG_SMP
4721
4722struct work_for_cpu {
4723        struct work_struct work;
4724        long (*fn)(void *);
4725        void *arg;
4726        long ret;
4727};
4728
4729static void work_for_cpu_fn(struct work_struct *work)
4730{
4731        struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
4732
4733        wfc->ret = wfc->fn(wfc->arg);
4734}
4735
4736/**
4737 * work_on_cpu - run a function in thread context on a particular cpu
4738 * @cpu: the cpu to run on
4739 * @fn: the function to run
4740 * @arg: the function arg
4741 *
4742 * It is up to the caller to ensure that the cpu doesn't go offline.
4743 * The caller must not hold any locks which would prevent @fn from completing.
4744 *
4745 * Return: The value @fn returns.
4746 */
4747long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4748{
4749        struct work_for_cpu wfc = { .fn = fn, .arg = arg };
4750
4751        INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4752        schedule_work_on(cpu, &wfc.work);
4753        flush_work(&wfc.work);
4754        destroy_work_on_stack(&wfc.work);
4755        return wfc.ret;
4756}
4757EXPORT_SYMBOL_GPL(work_on_cpu);
4758#endif /* CONFIG_SMP */
4759
4760#ifdef CONFIG_FREEZER
4761
4762/**
4763 * freeze_workqueues_begin - begin freezing workqueues
4764 *
4765 * Start freezing workqueues.  After this function returns, all freezable
4766 * workqueues will queue new works to their delayed_works list instead of
4767 * pool->worklist.
4768 *
4769 * CONTEXT:
4770 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4771 */
4772void freeze_workqueues_begin(void)
4773{
4774        struct workqueue_struct *wq;
4775        struct pool_workqueue *pwq;
4776
4777        mutex_lock(&wq_pool_mutex);
4778
4779        WARN_ON_ONCE(workqueue_freezing);
4780        workqueue_freezing = true;
4781
4782        list_for_each_entry(wq, &workqueues, list) {
4783                mutex_lock(&wq->mutex);
4784                for_each_pwq(pwq, wq)
4785                        pwq_adjust_max_active(pwq);
4786                mutex_unlock(&wq->mutex);
4787        }
4788
4789        mutex_unlock(&wq_pool_mutex);
4790}
4791
4792/**
4793 * freeze_workqueues_busy - are freezable workqueues still busy?
4794 *
4795 * Check whether freezing is complete.  This function must be called
4796 * between freeze_workqueues_begin() and thaw_workqueues().
4797 *
4798 * CONTEXT:
4799 * Grabs and releases wq_pool_mutex.
4800 *
4801 * Return:
4802 * %true if some freezable workqueues are still busy.  %false if freezing
4803 * is complete.
4804 */
4805bool freeze_workqueues_busy(void)
4806{
4807        bool busy = false;
4808        struct workqueue_struct *wq;
4809        struct pool_workqueue *pwq;
4810
4811        mutex_lock(&wq_pool_mutex);
4812
4813        WARN_ON_ONCE(!workqueue_freezing);
4814
4815        list_for_each_entry(wq, &workqueues, list) {
4816                if (!(wq->flags & WQ_FREEZABLE))
4817                        continue;
4818                /*
4819                 * nr_active is monotonically decreasing.  It's safe
4820                 * to peek without lock.
4821                 */
4822                rcu_read_lock_sched();
4823                for_each_pwq(pwq, wq) {
4824                        WARN_ON_ONCE(pwq->nr_active < 0);
4825                        if (pwq->nr_active) {
4826                                busy = true;
4827                                rcu_read_unlock_sched();
4828                                goto out_unlock;
4829                        }
4830                }
4831                rcu_read_unlock_sched();
4832        }
4833out_unlock:
4834        mutex_unlock(&wq_pool_mutex);
4835        return busy;
4836}
4837
4838/**
4839 * thaw_workqueues - thaw workqueues
4840 *
4841 * Thaw workqueues.  Normal queueing is restored and all collected
4842 * frozen works are transferred to their respective pool worklists.
4843 *
4844 * CONTEXT:
4845 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4846 */
4847void thaw_workqueues(void)
4848{
4849        struct workqueue_struct *wq;
4850        struct pool_workqueue *pwq;
4851
4852        mutex_lock(&wq_pool_mutex);
4853
4854        if (!workqueue_freezing)
4855                goto out_unlock;
4856
4857        workqueue_freezing = false;
4858
4859        /* restore max_active and repopulate worklist */
4860        list_for_each_entry(wq, &workqueues, list) {
4861                mutex_lock(&wq->mutex);
4862                for_each_pwq(pwq, wq)
4863                        pwq_adjust_max_active(pwq);
4864                mutex_unlock(&wq->mutex);
4865        }
4866
4867out_unlock:
4868        mutex_unlock(&wq_pool_mutex);
4869}
4870#endif /* CONFIG_FREEZER */
4871
4872static int workqueue_apply_unbound_cpumask(void)
4873{
4874        LIST_HEAD(ctxs);
4875        int ret = 0;
4876        struct workqueue_struct *wq;
4877        struct apply_wqattrs_ctx *ctx, *n;
4878
4879        lockdep_assert_held(&wq_pool_mutex);
4880
4881        list_for_each_entry(wq, &workqueues, list) {
4882                if (!(wq->flags & WQ_UNBOUND))
4883                        continue;
4884                /* creating multiple pwqs breaks ordering guarantee */
4885                if (wq->flags & __WQ_ORDERED)
4886                        continue;
4887
4888                ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
4889                if (!ctx) {
4890                        ret = -ENOMEM;
4891                        break;
4892                }
4893
4894                list_add_tail(&ctx->list, &ctxs);
4895        }
4896
4897        list_for_each_entry_safe(ctx, n, &ctxs, list) {
4898                if (!ret)
4899                        apply_wqattrs_commit(ctx);
4900                apply_wqattrs_cleanup(ctx);
4901        }
4902
4903        return ret;
4904}
4905
4906/**
4907 *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
4908 *  @cpumask: the cpumask to set
4909 *
4910 *  The low-level workqueues cpumask is a global cpumask that limits
4911 *  the affinity of all unbound workqueues.  This function check the @cpumask
4912 *  and apply it to all unbound workqueues and updates all pwqs of them.
4913 *
4914 *  Retun:      0       - Success
4915 *              -EINVAL - Invalid @cpumask
4916 *              -ENOMEM - Failed to allocate memory for attrs or pwqs.
4917 */
4918int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
4919{
4920        int ret = -EINVAL;
4921        cpumask_var_t saved_cpumask;
4922
4923        if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
4924                return -ENOMEM;
4925
4926        cpumask_and(cpumask, cpumask, cpu_possible_mask);
4927        if (!cpumask_empty(cpumask)) {
4928                apply_wqattrs_lock();
4929
4930                /* save the old wq_unbound_cpumask. */
4931                cpumask_copy(saved_cpumask, wq_unbound_cpumask);
4932
4933                /* update wq_unbound_cpumask at first and apply it to wqs. */
4934                cpumask_copy(wq_unbound_cpumask, cpumask);
4935                ret = workqueue_apply_unbound_cpumask();
4936
4937                /* restore the wq_unbound_cpumask when failed. */
4938                if (ret < 0)
4939                        cpumask_copy(wq_unbound_cpumask, saved_cpumask);
4940
4941                apply_wqattrs_unlock();
4942        }
4943
4944        free_cpumask_var(saved_cpumask);
4945        return ret;
4946}
4947
4948#ifdef CONFIG_SYSFS
4949/*
4950 * Workqueues with WQ_SYSFS flag set is visible to userland via
4951 * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
4952 * following attributes.
4953 *
4954 *  per_cpu     RO bool : whether the workqueue is per-cpu or unbound
4955 *  max_active  RW int  : maximum number of in-flight work items
4956 *
4957 * Unbound workqueues have the following extra attributes.
4958 *
4959 *  id          RO int  : the associated pool ID
4960 *  nice        RW int  : nice value of the workers
4961 *  cpumask     RW mask : bitmask of allowed CPUs for the workers
4962 */
4963struct wq_device {
4964        struct workqueue_struct         *wq;
4965        struct device                   dev;
4966};
4967
4968static struct workqueue_struct *dev_to_wq(struct device *dev)
4969{
4970        struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
4971
4972        return wq_dev->wq;
4973}
4974
4975static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
4976                            char *buf)
4977{
4978        struct workqueue_struct *wq = dev_to_wq(dev);
4979
4980        return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
4981}
4982static DEVICE_ATTR_RO(per_cpu);
4983
4984static ssize_t max_active_show(struct device *dev,
4985                               struct device_attribute *attr, char *buf)
4986{
4987        struct workqueue_struct *wq = dev_to_wq(dev);
4988
4989        return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
4990}
4991
4992static ssize_t max_active_store(struct device *dev,
4993                                struct device_attribute *attr, const char *buf,
4994                                size_t count)
4995{
4996        struct workqueue_struct *wq = dev_to_wq(dev);
4997        int val;
4998
4999        if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5000                return -EINVAL;
5001
5002        workqueue_set_max_active(wq, val);
5003        return count;
5004}
5005static DEVICE_ATTR_RW(max_active);
5006
5007static struct attribute *wq_sysfs_attrs[] = {
5008        &dev_attr_per_cpu.attr,
5009        &dev_attr_max_active.attr,
5010        NULL,
5011};
5012ATTRIBUTE_GROUPS(wq_sysfs);
5013
5014static ssize_t wq_pool_ids_show(struct device *dev,
5015                                struct device_attribute *attr, char *buf)
5016{
5017        struct workqueue_struct *wq = dev_to_wq(dev);
5018        const char *delim = "";
5019        int node, written = 0;
5020
5021        rcu_read_lock_sched();
5022        for_each_node(node) {
5023                written += scnprintf(buf + written, PAGE_SIZE - written,
5024                                     "%s%d:%d", delim, node,
5025                                     unbound_pwq_by_node(wq, node)->pool->id);
5026                delim = " ";
5027        }
5028        written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
5029        rcu_read_unlock_sched();
5030
5031        return written;
5032}
5033
5034static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
5035                            char *buf)
5036{
5037        struct workqueue_struct *wq = dev_to_wq(dev);
5038        int written;
5039
5040        mutex_lock(&wq->mutex);
5041        written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
5042        mutex_unlock(&wq->mutex);
5043
5044        return written;
5045}
5046
5047/* prepare workqueue_attrs for sysfs store operations */
5048static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
5049{
5050        struct workqueue_attrs *attrs;
5051
5052        lockdep_assert_held(&wq_pool_mutex);
5053
5054        attrs = alloc_workqueue_attrs(GFP_KERNEL);
5055        if (!attrs)
5056                return NULL;
5057
5058        copy_workqueue_attrs(attrs, wq->unbound_attrs);
5059        return attrs;
5060}
5061
5062static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
5063                             const char *buf, size_t count)
5064{
5065        struct workqueue_struct *wq = dev_to_wq(dev);
5066        struct workqueue_attrs *attrs;
5067        int ret = -ENOMEM;
5068
5069        apply_wqattrs_lock();
5070
5071        attrs = wq_sysfs_prep_attrs(wq);
5072        if (!attrs)
5073                goto out_unlock;
5074
5075        if (sscanf(buf, "%d", &attrs->nice) == 1 &&
5076            attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
5077                ret = apply_workqueue_attrs_locked(wq, attrs);
5078        else
5079                ret = -EINVAL;
5080
5081out_unlock:
5082        apply_wqattrs_unlock();
5083        free_workqueue_attrs(attrs);
5084        return ret ?: count;
5085}
5086
5087static ssize_t wq_cpumask_show(struct device *dev,
5088                               struct device_attribute *attr, char *buf)
5089{
5090        struct workqueue_struct *wq = dev_to_wq(dev);
5091        int written;
5092
5093        mutex_lock(&wq->mutex);
5094        written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5095                            cpumask_pr_args(wq->unbound_attrs->cpumask));
5096        mutex_unlock(&wq->mutex);
5097        return written;
5098}
5099
5100static ssize_t wq_cpumask_store(struct device *dev,
5101                                struct device_attribute *attr,
5102                                const char *buf, size_t count)
5103{
5104        struct workqueue_struct *wq = dev_to_wq(dev);
5105        struct workqueue_attrs *attrs;
5106        int ret = -ENOMEM;
5107
5108        apply_wqattrs_lock();
5109
5110        attrs = wq_sysfs_prep_attrs(wq);
5111        if (!attrs)
5112                goto out_unlock;
5113
5114        ret = cpumask_parse(buf, attrs->cpumask);
5115        if (!ret)
5116                ret = apply_workqueue_attrs_locked(wq, attrs);
5117
5118out_unlock:
5119        apply_wqattrs_unlock();
5120        free_workqueue_attrs(attrs);
5121        return ret ?: count;
5122}
5123
5124static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
5125                            char *buf)
5126{
5127        struct workqueue_struct *wq = dev_to_wq(dev);
5128        int written;
5129
5130        mutex_lock(&wq->mutex);
5131        written = scnprintf(buf, PAGE_SIZE, "%d\n",
5132                            !wq->unbound_attrs->no_numa);
5133        mutex_unlock(&wq->mutex);
5134
5135        return written;
5136}
5137
5138static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
5139                             const char *buf, size_t count)
5140{
5141        struct workqueue_struct *wq = dev_to_wq(dev);
5142        struct workqueue_attrs *attrs;
5143        int v, ret = -ENOMEM;
5144
5145        apply_wqattrs_lock();
5146
5147        attrs = wq_sysfs_prep_attrs(wq);
5148        if (!attrs)
5149                goto out_unlock;
5150
5151        ret = -EINVAL;
5152        if (sscanf(buf, "%d", &v) == 1) {
5153                attrs->no_numa = !v;
5154                ret = apply_workqueue_attrs_locked(wq, attrs);
5155        }
5156
5157out_unlock:
5158        apply_wqattrs_unlock();
5159        free_workqueue_attrs(attrs);
5160        return ret ?: count;
5161}
5162
5163static struct device_attribute wq_sysfs_unbound_attrs[] = {
5164        __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
5165        __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
5166        __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
5167        __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
5168        __ATTR_NULL,
5169};
5170
5171static struct bus_type wq_subsys = {
5172        .name                           = "workqueue",
5173        .dev_groups                     = wq_sysfs_groups,
5174};
5175
5176static ssize_t wq_unbound_cpumask_show(struct device *dev,
5177                struct device_attribute *attr, char *buf)
5178{
5179        int written;
5180
5181        mutex_lock(&wq_pool_mutex);
5182        written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5183                            cpumask_pr_args(wq_unbound_cpumask));
5184        mutex_unlock(&wq_pool_mutex);
5185
5186        return written;
5187}
5188
5189static ssize_t wq_unbound_cpumask_store(struct device *dev,
5190                struct device_attribute *attr, const char *buf, size_t count)
5191{
5192        cpumask_var_t cpumask;
5193        int ret;
5194
5195        if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
5196                return -ENOMEM;
5197
5198        ret = cpumask_parse(buf, cpumask);
5199        if (!ret)
5200                ret = workqueue_set_unbound_cpumask(cpumask);
5201
5202        free_cpumask_var(cpumask);
5203        return ret ? ret : count;
5204}
5205
5206static struct device_attribute wq_sysfs_cpumask_attr =
5207        __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
5208               wq_unbound_cpumask_store);
5209
5210static int __init wq_sysfs_init(void)
5211{
5212        int err;
5213
5214        err = subsys_virtual_register(&wq_subsys, NULL);
5215        if (err)
5216                return err;
5217
5218        return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
5219}
5220core_initcall(wq_sysfs_init);
5221
5222static void wq_device_release(struct device *dev)
5223{
5224        struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5225
5226        kfree(wq_dev);
5227}
5228
5229/**
5230 * workqueue_sysfs_register - make a workqueue visible in sysfs
5231 * @wq: the workqueue to register
5232 *
5233 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
5234 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
5235 * which is the preferred method.
5236 *
5237 * Workqueue user should use this function directly iff it wants to apply
5238 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
5239 * apply_workqueue_attrs() may race against userland updating the
5240 * attributes.
5241 *
5242 * Return: 0 on success, -errno on failure.
5243 */
5244int workqueue_sysfs_register(struct workqueue_struct *wq)
5245{
5246        struct wq_device *wq_dev;
5247        int ret;
5248
5249        /*
5250         * Adjusting max_active or creating new pwqs by applying
5251         * attributes breaks ordering guarantee.  Disallow exposing ordered
5252         * workqueues.
5253         */
5254        if (WARN_ON(wq->flags & __WQ_ORDERED))
5255                return -EINVAL;
5256
5257        wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
5258        if (!wq_dev)
5259                return -ENOMEM;
5260
5261        wq_dev->wq = wq;
5262        wq_dev->dev.bus = &wq_subsys;
5263        wq_dev->dev.release = wq_device_release;
5264        dev_set_name(&wq_dev->dev, "%s", wq->name);
5265
5266        /*
5267         * unbound_attrs are created separately.  Suppress uevent until
5268         * everything is ready.
5269         */
5270        dev_set_uevent_suppress(&wq_dev->dev, true);
5271
5272        ret = device_register(&wq_dev->dev);
5273        if (ret) {
5274                kfree(wq_dev);
5275                wq->wq_dev = NULL;
5276                return ret;
5277        }
5278
5279        if (wq->flags & WQ_UNBOUND) {
5280                struct device_attribute *attr;
5281
5282                for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
5283                        ret = device_create_file(&wq_dev->dev, attr);
5284                        if (ret) {
5285                                device_unregister(&wq_dev->dev);
5286                                wq->wq_dev = NULL;
5287                                return ret;
5288                        }
5289                }
5290        }
5291
5292        dev_set_uevent_suppress(&wq_dev->dev, false);
5293        kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
5294        return 0;
5295}
5296
5297/**
5298 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
5299 * @wq: the workqueue to unregister
5300 *
5301 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
5302 */
5303static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
5304{
5305        struct wq_device *wq_dev = wq->wq_dev;
5306
5307        if (!wq->wq_dev)
5308                return;
5309
5310        wq->wq_dev = NULL;
5311        device_unregister(&wq_dev->dev);
5312}
5313#else   /* CONFIG_SYSFS */
5314static void workqueue_sysfs_unregister(struct workqueue_struct *wq)     { }
5315#endif  /* CONFIG_SYSFS */
5316
5317/*
5318 * Workqueue watchdog.
5319 *
5320 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
5321 * flush dependency, a concurrency managed work item which stays RUNNING
5322 * indefinitely.  Workqueue stalls can be very difficult to debug as the
5323 * usual warning mechanisms don't trigger and internal workqueue state is
5324 * largely opaque.
5325 *
5326 * Workqueue watchdog monitors all worker pools periodically and dumps
5327 * state if some pools failed to make forward progress for a while where
5328 * forward progress is defined as the first item on ->worklist changing.
5329 *
5330 * This mechanism is controlled through the kernel parameter
5331 * "workqueue.watchdog_thresh" which can be updated at runtime through the
5332 * corresponding sysfs parameter file.
5333 */
5334#ifdef CONFIG_WQ_WATCHDOG
5335
5336static void wq_watchdog_timer_fn(unsigned long data);
5337
5338static unsigned long wq_watchdog_thresh = 30;
5339static struct timer_list wq_watchdog_timer =
5340        TIMER_DEFERRED_INITIALIZER(wq_watchdog_timer_fn, 0, 0);
5341
5342static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
5343static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
5344
5345static void wq_watchdog_reset_touched(void)
5346{
5347        int cpu;
5348
5349        wq_watchdog_touched = jiffies;
5350        for_each_possible_cpu(cpu)
5351                per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5352}
5353
5354static void wq_watchdog_timer_fn(unsigned long data)
5355{
5356        unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
5357        bool lockup_detected = false;
5358        struct worker_pool *pool;
5359        int pi;
5360
5361        if (!thresh)
5362                return;
5363
5364        rcu_read_lock();
5365
5366        for_each_pool(pool, pi) {
5367                unsigned long pool_ts, touched, ts;
5368
5369                if (list_empty(&pool->worklist))
5370                        continue;
5371
5372                /* get the latest of pool and touched timestamps */
5373                pool_ts = READ_ONCE(pool->watchdog_ts);
5374                touched = READ_ONCE(wq_watchdog_touched);
5375
5376                if (time_after(pool_ts, touched))
5377                        ts = pool_ts;
5378                else
5379                        ts = touched;
5380
5381                if (pool->cpu >= 0) {
5382                        unsigned long cpu_touched =
5383                                READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
5384                                                  pool->cpu));
5385                        if (time_after(cpu_touched, ts))
5386                                ts = cpu_touched;
5387                }
5388
5389                /* did we stall? */
5390                if (time_after(jiffies, ts + thresh)) {
5391                        lockup_detected = true;
5392                        pr_emerg("BUG: workqueue lockup - pool");
5393                        pr_cont_pool_info(pool);
5394                        pr_cont(" stuck for %us!\n",
5395                                jiffies_to_msecs(jiffies - pool_ts) / 1000);
5396                }
5397        }
5398
5399        rcu_read_unlock();
5400
5401        if (lockup_detected)
5402                show_workqueue_state();
5403
5404        wq_watchdog_reset_touched();
5405        mod_timer(&wq_watchdog_timer, jiffies + thresh);
5406}
5407
5408void wq_watchdog_touch(int cpu)
5409{
5410        if (cpu >= 0)
5411                per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5412        else
5413                wq_watchdog_touched = jiffies;
5414}
5415
5416static void wq_watchdog_set_thresh(unsigned long thresh)
5417{
5418        wq_watchdog_thresh = 0;
5419        del_timer_sync(&wq_watchdog_timer);
5420
5421        if (thresh) {
5422                wq_watchdog_thresh = thresh;
5423                wq_watchdog_reset_touched();
5424                mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
5425        }
5426}
5427
5428static int wq_watchdog_param_set_thresh(const char *val,
5429                                        const struct kernel_param *kp)
5430{
5431        unsigned long thresh;
5432        int ret;
5433
5434        ret = kstrtoul(val, 0, &thresh);
5435        if (ret)
5436                return ret;
5437
5438        if (system_wq)
5439                wq_watchdog_set_thresh(thresh);
5440        else
5441                wq_watchdog_thresh = thresh;
5442
5443        return 0;
5444}
5445
5446static const struct kernel_param_ops wq_watchdog_thresh_ops = {
5447        .set    = wq_watchdog_param_set_thresh,
5448        .get    = param_get_ulong,
5449};
5450
5451module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
5452                0644);
5453
5454static void wq_watchdog_init(void)
5455{
5456        wq_watchdog_set_thresh(wq_watchdog_thresh);
5457}
5458
5459#else   /* CONFIG_WQ_WATCHDOG */
5460
5461static inline void wq_watchdog_init(void) { }
5462
5463#endif  /* CONFIG_WQ_WATCHDOG */
5464
5465static void __init wq_numa_init(void)
5466{
5467        cpumask_var_t *tbl;
5468        int node, cpu;
5469
5470        if (num_possible_nodes() <= 1)
5471                return;
5472
5473        if (wq_disable_numa) {
5474                pr_info("workqueue: NUMA affinity support disabled\n");
5475                return;
5476        }
5477
5478        wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
5479        BUG_ON(!wq_update_unbound_numa_attrs_buf);
5480
5481        /*
5482         * We want masks of possible CPUs of each node which isn't readily
5483         * available.  Build one from cpu_to_node() which should have been
5484         * fully initialized by now.
5485         */
5486        tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL);
5487        BUG_ON(!tbl);
5488
5489        for_each_node(node)
5490                BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
5491                                node_online(node) ? node : NUMA_NO_NODE));
5492
5493        for_each_possible_cpu(cpu) {
5494                node = cpu_to_node(cpu);
5495                if (WARN_ON(node == NUMA_NO_NODE)) {
5496                        pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
5497                        /* happens iff arch is bonkers, let's just proceed */
5498                        return;
5499                }
5500                cpumask_set_cpu(cpu, tbl[node]);
5501        }
5502
5503        wq_numa_possible_cpumask = tbl;
5504        wq_numa_enabled = true;
5505}
5506
5507static int __init init_workqueues(void)
5508{
5509        int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5510        int i, cpu;
5511
5512        WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5513
5514        BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
5515        cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
5516
5517        pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
5518
5519        cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
5520        hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
5521
5522        wq_numa_init();
5523
5524        /* initialize CPU pools */
5525        for_each_possible_cpu(cpu) {
5526                struct worker_pool *pool;
5527
5528                i = 0;
5529                for_each_cpu_worker_pool(pool, cpu) {
5530                        BUG_ON(init_worker_pool(pool));
5531                        pool->cpu = cpu;
5532                        cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
5533                        pool->attrs->nice = std_nice[i++];
5534                        pool->node = cpu_to_node(cpu);
5535
5536                        /* alloc pool ID */
5537                        mutex_lock(&wq_pool_mutex);
5538                        BUG_ON(worker_pool_assign_id(pool));
5539                        mutex_unlock(&wq_pool_mutex);
5540                }
5541        }
5542
5543        /* create the initial worker */
5544        for_each_online_cpu(cpu) {
5545                struct worker_pool *pool;
5546
5547                for_each_cpu_worker_pool(pool, cpu) {
5548                        pool->flags &= ~POOL_DISASSOCIATED;
5549                        BUG_ON(!create_worker(pool));
5550                }
5551        }
5552
5553        /* create default unbound and ordered wq attrs */
5554        for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5555                struct workqueue_attrs *attrs;
5556
5557                BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5558                attrs->nice = std_nice[i];
5559                unbound_std_wq_attrs[i] = attrs;
5560
5561                /*
5562                 * An ordered wq should have only one pwq as ordering is
5563                 * guaranteed by max_active which is enforced by pwqs.
5564                 * Turn off NUMA so that dfl_pwq is used for all nodes.
5565                 */
5566                BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5567                attrs->nice = std_nice[i];
5568                attrs->no_numa = true;
5569                ordered_wq_attrs[i] = attrs;
5570        }
5571
5572        system_wq = alloc_workqueue("events", 0, 0);
5573        system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
5574        system_long_wq = alloc_workqueue("events_long", 0, 0);
5575        system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
5576                                            WQ_UNBOUND_MAX_ACTIVE);
5577        system_freezable_wq = alloc_workqueue("events_freezable",
5578                                              WQ_FREEZABLE, 0);
5579        system_power_efficient_wq = alloc_workqueue("events_power_efficient",
5580                                              WQ_POWER_EFFICIENT, 0);
5581        system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
5582                                              WQ_FREEZABLE | WQ_POWER_EFFICIENT,
5583                                              0);
5584        BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
5585               !system_unbound_wq || !system_freezable_wq ||
5586               !system_power_efficient_wq ||
5587               !system_freezable_power_efficient_wq);
5588
5589        wq_watchdog_init();
5590
5591        return 0;
5592}
5593early_initcall(init_workqueues);
5594