linux/kernel/workqueue.c
<<
>>
Prefs
   1/*
   2 * kernel/workqueue.c - generic async execution with shared worker pool
   3 *
   4 * Copyright (C) 2002           Ingo Molnar
   5 *
   6 *   Derived from the taskqueue/keventd code by:
   7 *     David Woodhouse <dwmw2@infradead.org>
   8 *     Andrew Morton
   9 *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
  10 *     Theodore Ts'o <tytso@mit.edu>
  11 *
  12 * Made to use alloc_percpu by Christoph Lameter.
  13 *
  14 * Copyright (C) 2010           SUSE Linux Products GmbH
  15 * Copyright (C) 2010           Tejun Heo <tj@kernel.org>
  16 *
  17 * This is the generic async execution mechanism.  Work items as are
  18 * executed in process context.  The worker pool is shared and
  19 * automatically managed.  There are two worker pools for each CPU (one for
  20 * normal work items and the other for high priority ones) and some extra
  21 * pools for workqueues which are not bound to any specific CPU - the
  22 * number of these backing pools is dynamic.
  23 *
  24 * Please read Documentation/workqueue.txt for details.
  25 */
  26
  27#include <linux/export.h>
  28#include <linux/kernel.h>
  29#include <linux/sched.h>
  30#include <linux/init.h>
  31#include <linux/signal.h>
  32#include <linux/completion.h>
  33#include <linux/workqueue.h>
  34#include <linux/slab.h>
  35#include <linux/cpu.h>
  36#include <linux/notifier.h>
  37#include <linux/kthread.h>
  38#include <linux/hardirq.h>
  39#include <linux/mempolicy.h>
  40#include <linux/freezer.h>
  41#include <linux/kallsyms.h>
  42#include <linux/debug_locks.h>
  43#include <linux/lockdep.h>
  44#include <linux/idr.h>
  45#include <linux/jhash.h>
  46#include <linux/hashtable.h>
  47#include <linux/rculist.h>
  48#include <linux/nodemask.h>
  49#include <linux/moduleparam.h>
  50#include <linux/uaccess.h>
  51
  52#include "workqueue_internal.h"
  53
  54enum {
  55        /*
  56         * worker_pool flags
  57         *
  58         * A bound pool is either associated or disassociated with its CPU.
  59         * While associated (!DISASSOCIATED), all workers are bound to the
  60         * CPU and none has %WORKER_UNBOUND set and concurrency management
  61         * is in effect.
  62         *
  63         * While DISASSOCIATED, the cpu may be offline and all workers have
  64         * %WORKER_UNBOUND set and concurrency management disabled, and may
  65         * be executing on any CPU.  The pool behaves as an unbound one.
  66         *
  67         * Note that DISASSOCIATED should be flipped only while holding
  68         * attach_mutex to avoid changing binding state while
  69         * worker_attach_to_pool() is in progress.
  70         */
  71        POOL_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
  72
  73        /* worker flags */
  74        WORKER_DIE              = 1 << 1,       /* die die die */
  75        WORKER_IDLE             = 1 << 2,       /* is idle */
  76        WORKER_PREP             = 1 << 3,       /* preparing to run works */
  77        WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
  78        WORKER_UNBOUND          = 1 << 7,       /* worker is unbound */
  79        WORKER_REBOUND          = 1 << 8,       /* worker was rebound */
  80
  81        WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_CPU_INTENSIVE |
  82                                  WORKER_UNBOUND | WORKER_REBOUND,
  83
  84        NR_STD_WORKER_POOLS     = 2,            /* # standard pools per cpu */
  85
  86        UNBOUND_POOL_HASH_ORDER = 6,            /* hashed by pool->attrs */
  87        BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
  88
  89        MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
  90        IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
  91
  92        MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
  93                                                /* call for help after 10ms
  94                                                   (min two ticks) */
  95        MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
  96        CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
  97
  98        /*
  99         * Rescue workers are used only on emergencies and shared by
 100         * all cpus.  Give MIN_NICE.
 101         */
 102        RESCUER_NICE_LEVEL      = MIN_NICE,
 103        HIGHPRI_NICE_LEVEL      = MIN_NICE,
 104
 105        WQ_NAME_LEN             = 24,
 106};
 107
 108/*
 109 * Structure fields follow one of the following exclusion rules.
 110 *
 111 * I: Modifiable by initialization/destruction paths and read-only for
 112 *    everyone else.
 113 *
 114 * P: Preemption protected.  Disabling preemption is enough and should
 115 *    only be modified and accessed from the local cpu.
 116 *
 117 * L: pool->lock protected.  Access with pool->lock held.
 118 *
 119 * X: During normal operation, modification requires pool->lock and should
 120 *    be done only from local cpu.  Either disabling preemption on local
 121 *    cpu or grabbing pool->lock is enough for read access.  If
 122 *    POOL_DISASSOCIATED is set, it's identical to L.
 123 *
 124 * A: pool->attach_mutex protected.
 125 *
 126 * PL: wq_pool_mutex protected.
 127 *
 128 * PR: wq_pool_mutex protected for writes.  Sched-RCU protected for reads.
 129 *
 130 * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
 131 *
 132 * PWR: wq_pool_mutex and wq->mutex protected for writes.  Either or
 133 *      sched-RCU for reads.
 134 *
 135 * WQ: wq->mutex protected.
 136 *
 137 * WR: wq->mutex protected for writes.  Sched-RCU protected for reads.
 138 *
 139 * MD: wq_mayday_lock protected.
 140 */
 141
 142/* struct worker is defined in workqueue_internal.h */
 143
 144struct worker_pool {
 145        spinlock_t              lock;           /* the pool lock */
 146        int                     cpu;            /* I: the associated cpu */
 147        int                     node;           /* I: the associated node ID */
 148        int                     id;             /* I: pool ID */
 149        unsigned int            flags;          /* X: flags */
 150
 151        unsigned long           watchdog_ts;    /* L: watchdog timestamp */
 152
 153        struct list_head        worklist;       /* L: list of pending works */
 154        int                     nr_workers;     /* L: total number of workers */
 155
 156        /* nr_idle includes the ones off idle_list for rebinding */
 157        int                     nr_idle;        /* L: currently idle ones */
 158
 159        struct list_head        idle_list;      /* X: list of idle workers */
 160        struct timer_list       idle_timer;     /* L: worker idle timeout */
 161        struct timer_list       mayday_timer;   /* L: SOS timer for workers */
 162
 163        /* a workers is either on busy_hash or idle_list, or the manager */
 164        DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
 165                                                /* L: hash of busy workers */
 166
 167        /* see manage_workers() for details on the two manager mutexes */
 168        struct mutex            manager_arb;    /* manager arbitration */
 169        struct worker           *manager;       /* L: purely informational */
 170        struct mutex            attach_mutex;   /* attach/detach exclusion */
 171        struct list_head        workers;        /* A: attached workers */
 172        struct completion       *detach_completion; /* all workers detached */
 173
 174        struct ida              worker_ida;     /* worker IDs for task name */
 175
 176        struct workqueue_attrs  *attrs;         /* I: worker attributes */
 177        struct hlist_node       hash_node;      /* PL: unbound_pool_hash node */
 178        int                     refcnt;         /* PL: refcnt for unbound pools */
 179
 180        /*
 181         * The current concurrency level.  As it's likely to be accessed
 182         * from other CPUs during try_to_wake_up(), put it in a separate
 183         * cacheline.
 184         */
 185        atomic_t                nr_running ____cacheline_aligned_in_smp;
 186
 187        /*
 188         * Destruction of pool is sched-RCU protected to allow dereferences
 189         * from get_work_pool().
 190         */
 191        struct rcu_head         rcu;
 192} ____cacheline_aligned_in_smp;
 193
 194/*
 195 * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
 196 * of work_struct->data are used for flags and the remaining high bits
 197 * point to the pwq; thus, pwqs need to be aligned at two's power of the
 198 * number of flag bits.
 199 */
 200struct pool_workqueue {
 201        struct worker_pool      *pool;          /* I: the associated pool */
 202        struct workqueue_struct *wq;            /* I: the owning workqueue */
 203        int                     work_color;     /* L: current color */
 204        int                     flush_color;    /* L: flushing color */
 205        int                     refcnt;         /* L: reference count */
 206        int                     nr_in_flight[WORK_NR_COLORS];
 207                                                /* L: nr of in_flight works */
 208        int                     nr_active;      /* L: nr of active works */
 209        int                     max_active;     /* L: max active works */
 210        struct list_head        delayed_works;  /* L: delayed works */
 211        struct list_head        pwqs_node;      /* WR: node on wq->pwqs */
 212        struct list_head        mayday_node;    /* MD: node on wq->maydays */
 213
 214        /*
 215         * Release of unbound pwq is punted to system_wq.  See put_pwq()
 216         * and pwq_unbound_release_workfn() for details.  pool_workqueue
 217         * itself is also sched-RCU protected so that the first pwq can be
 218         * determined without grabbing wq->mutex.
 219         */
 220        struct work_struct      unbound_release_work;
 221        struct rcu_head         rcu;
 222} __aligned(1 << WORK_STRUCT_FLAG_BITS);
 223
 224/*
 225 * Structure used to wait for workqueue flush.
 226 */
 227struct wq_flusher {
 228        struct list_head        list;           /* WQ: list of flushers */
 229        int                     flush_color;    /* WQ: flush color waiting for */
 230        struct completion       done;           /* flush completion */
 231};
 232
 233struct wq_device;
 234
 235/*
 236 * The externally visible workqueue.  It relays the issued work items to
 237 * the appropriate worker_pool through its pool_workqueues.
 238 */
 239struct workqueue_struct {
 240        struct list_head        pwqs;           /* WR: all pwqs of this wq */
 241        struct list_head        list;           /* PR: list of all workqueues */
 242
 243        struct mutex            mutex;          /* protects this wq */
 244        int                     work_color;     /* WQ: current work color */
 245        int                     flush_color;    /* WQ: current flush color */
 246        atomic_t                nr_pwqs_to_flush; /* flush in progress */
 247        struct wq_flusher       *first_flusher; /* WQ: first flusher */
 248        struct list_head        flusher_queue;  /* WQ: flush waiters */
 249        struct list_head        flusher_overflow; /* WQ: flush overflow list */
 250
 251        struct list_head        maydays;        /* MD: pwqs requesting rescue */
 252        struct worker           *rescuer;       /* I: rescue worker */
 253
 254        int                     nr_drainers;    /* WQ: drain in progress */
 255        int                     saved_max_active; /* WQ: saved pwq max_active */
 256
 257        struct workqueue_attrs  *unbound_attrs; /* PW: only for unbound wqs */
 258        struct pool_workqueue   *dfl_pwq;       /* PW: only for unbound wqs */
 259
 260#ifdef CONFIG_SYSFS
 261        struct wq_device        *wq_dev;        /* I: for sysfs interface */
 262#endif
 263#ifdef CONFIG_LOCKDEP
 264        struct lockdep_map      lockdep_map;
 265#endif
 266        char                    name[WQ_NAME_LEN]; /* I: workqueue name */
 267
 268        /*
 269         * Destruction of workqueue_struct is sched-RCU protected to allow
 270         * walking the workqueues list without grabbing wq_pool_mutex.
 271         * This is used to dump all workqueues from sysrq.
 272         */
 273        struct rcu_head         rcu;
 274
 275        /* hot fields used during command issue, aligned to cacheline */
 276        unsigned int            flags ____cacheline_aligned; /* WQ: WQ_* flags */
 277        struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
 278        struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
 279};
 280
 281static struct kmem_cache *pwq_cache;
 282
 283static cpumask_var_t *wq_numa_possible_cpumask;
 284                                        /* possible CPUs of each node */
 285
 286static bool wq_disable_numa;
 287module_param_named(disable_numa, wq_disable_numa, bool, 0444);
 288
 289/* see the comment above the definition of WQ_POWER_EFFICIENT */
 290static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
 291module_param_named(power_efficient, wq_power_efficient, bool, 0444);
 292
 293static bool wq_online;                  /* can kworkers be created yet? */
 294
 295static bool wq_numa_enabled;            /* unbound NUMA affinity enabled */
 296
 297/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
 298static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
 299
 300static DEFINE_MUTEX(wq_pool_mutex);     /* protects pools and workqueues list */
 301static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
 302
 303static LIST_HEAD(workqueues);           /* PR: list of all workqueues */
 304static bool workqueue_freezing;         /* PL: have wqs started freezing? */
 305
 306/* PL: allowable cpus for unbound wqs and work items */
 307static cpumask_var_t wq_unbound_cpumask;
 308
 309/* CPU where unbound work was last round robin scheduled from this CPU */
 310static DEFINE_PER_CPU(int, wq_rr_cpu_last);
 311
 312/*
 313 * Local execution of unbound work items is no longer guaranteed.  The
 314 * following always forces round-robin CPU selection on unbound work items
 315 * to uncover usages which depend on it.
 316 */
 317#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
 318static bool wq_debug_force_rr_cpu = true;
 319#else
 320static bool wq_debug_force_rr_cpu = false;
 321#endif
 322module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
 323
 324/* the per-cpu worker pools */
 325static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
 326
 327static DEFINE_IDR(worker_pool_idr);     /* PR: idr of all pools */
 328
 329/* PL: hash of all unbound pools keyed by pool->attrs */
 330static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
 331
 332/* I: attributes used when instantiating standard unbound pools on demand */
 333static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
 334
 335/* I: attributes used when instantiating ordered pools on demand */
 336static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
 337
 338struct workqueue_struct *system_wq __read_mostly;
 339EXPORT_SYMBOL(system_wq);
 340struct workqueue_struct *system_highpri_wq __read_mostly;
 341EXPORT_SYMBOL_GPL(system_highpri_wq);
 342struct workqueue_struct *system_long_wq __read_mostly;
 343EXPORT_SYMBOL_GPL(system_long_wq);
 344struct workqueue_struct *system_unbound_wq __read_mostly;
 345EXPORT_SYMBOL_GPL(system_unbound_wq);
 346struct workqueue_struct *system_freezable_wq __read_mostly;
 347EXPORT_SYMBOL_GPL(system_freezable_wq);
 348struct workqueue_struct *system_power_efficient_wq __read_mostly;
 349EXPORT_SYMBOL_GPL(system_power_efficient_wq);
 350struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
 351EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
 352
 353static int worker_thread(void *__worker);
 354static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
 355
 356#define CREATE_TRACE_POINTS
 357#include <trace/events/workqueue.h>
 358
 359#define assert_rcu_or_pool_mutex()                                      \
 360        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&                 \
 361                         !lockdep_is_held(&wq_pool_mutex),              \
 362                         "sched RCU or wq_pool_mutex should be held")
 363
 364#define assert_rcu_or_wq_mutex(wq)                                      \
 365        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&                 \
 366                         !lockdep_is_held(&wq->mutex),                  \
 367                         "sched RCU or wq->mutex should be held")
 368
 369#define assert_rcu_or_wq_mutex_or_pool_mutex(wq)                        \
 370        RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() &&                 \
 371                         !lockdep_is_held(&wq->mutex) &&                \
 372                         !lockdep_is_held(&wq_pool_mutex),              \
 373                         "sched RCU, wq->mutex or wq_pool_mutex should be held")
 374
 375#define for_each_cpu_worker_pool(pool, cpu)                             \
 376        for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];               \
 377             (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
 378             (pool)++)
 379
 380/**
 381 * for_each_pool - iterate through all worker_pools in the system
 382 * @pool: iteration cursor
 383 * @pi: integer used for iteration
 384 *
 385 * This must be called either with wq_pool_mutex held or sched RCU read
 386 * locked.  If the pool needs to be used beyond the locking in effect, the
 387 * caller is responsible for guaranteeing that the pool stays online.
 388 *
 389 * The if/else clause exists only for the lockdep assertion and can be
 390 * ignored.
 391 */
 392#define for_each_pool(pool, pi)                                         \
 393        idr_for_each_entry(&worker_pool_idr, pool, pi)                  \
 394                if (({ assert_rcu_or_pool_mutex(); false; })) { }       \
 395                else
 396
 397/**
 398 * for_each_pool_worker - iterate through all workers of a worker_pool
 399 * @worker: iteration cursor
 400 * @pool: worker_pool to iterate workers of
 401 *
 402 * This must be called with @pool->attach_mutex.
 403 *
 404 * The if/else clause exists only for the lockdep assertion and can be
 405 * ignored.
 406 */
 407#define for_each_pool_worker(worker, pool)                              \
 408        list_for_each_entry((worker), &(pool)->workers, node)           \
 409                if (({ lockdep_assert_held(&pool->attach_mutex); false; })) { } \
 410                else
 411
 412/**
 413 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
 414 * @pwq: iteration cursor
 415 * @wq: the target workqueue
 416 *
 417 * This must be called either with wq->mutex held or sched RCU read locked.
 418 * If the pwq needs to be used beyond the locking in effect, the caller is
 419 * responsible for guaranteeing that the pwq stays online.
 420 *
 421 * The if/else clause exists only for the lockdep assertion and can be
 422 * ignored.
 423 */
 424#define for_each_pwq(pwq, wq)                                           \
 425        list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node)          \
 426                if (({ assert_rcu_or_wq_mutex(wq); false; })) { }       \
 427                else
 428
 429#ifdef CONFIG_DEBUG_OBJECTS_WORK
 430
 431static struct debug_obj_descr work_debug_descr;
 432
 433static void *work_debug_hint(void *addr)
 434{
 435        return ((struct work_struct *) addr)->func;
 436}
 437
 438static bool work_is_static_object(void *addr)
 439{
 440        struct work_struct *work = addr;
 441
 442        return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
 443}
 444
 445/*
 446 * fixup_init is called when:
 447 * - an active object is initialized
 448 */
 449static bool work_fixup_init(void *addr, enum debug_obj_state state)
 450{
 451        struct work_struct *work = addr;
 452
 453        switch (state) {
 454        case ODEBUG_STATE_ACTIVE:
 455                cancel_work_sync(work);
 456                debug_object_init(work, &work_debug_descr);
 457                return true;
 458        default:
 459                return false;
 460        }
 461}
 462
 463/*
 464 * fixup_free is called when:
 465 * - an active object is freed
 466 */
 467static bool work_fixup_free(void *addr, enum debug_obj_state state)
 468{
 469        struct work_struct *work = addr;
 470
 471        switch (state) {
 472        case ODEBUG_STATE_ACTIVE:
 473                cancel_work_sync(work);
 474                debug_object_free(work, &work_debug_descr);
 475                return true;
 476        default:
 477                return false;
 478        }
 479}
 480
 481static struct debug_obj_descr work_debug_descr = {
 482        .name           = "work_struct",
 483        .debug_hint     = work_debug_hint,
 484        .is_static_object = work_is_static_object,
 485        .fixup_init     = work_fixup_init,
 486        .fixup_free     = work_fixup_free,
 487};
 488
 489static inline void debug_work_activate(struct work_struct *work)
 490{
 491        debug_object_activate(work, &work_debug_descr);
 492}
 493
 494static inline void debug_work_deactivate(struct work_struct *work)
 495{
 496        debug_object_deactivate(work, &work_debug_descr);
 497}
 498
 499void __init_work(struct work_struct *work, int onstack)
 500{
 501        if (onstack)
 502                debug_object_init_on_stack(work, &work_debug_descr);
 503        else
 504                debug_object_init(work, &work_debug_descr);
 505}
 506EXPORT_SYMBOL_GPL(__init_work);
 507
 508void destroy_work_on_stack(struct work_struct *work)
 509{
 510        debug_object_free(work, &work_debug_descr);
 511}
 512EXPORT_SYMBOL_GPL(destroy_work_on_stack);
 513
 514void destroy_delayed_work_on_stack(struct delayed_work *work)
 515{
 516        destroy_timer_on_stack(&work->timer);
 517        debug_object_free(&work->work, &work_debug_descr);
 518}
 519EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
 520
 521#else
 522static inline void debug_work_activate(struct work_struct *work) { }
 523static inline void debug_work_deactivate(struct work_struct *work) { }
 524#endif
 525
 526/**
 527 * worker_pool_assign_id - allocate ID and assing it to @pool
 528 * @pool: the pool pointer of interest
 529 *
 530 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
 531 * successfully, -errno on failure.
 532 */
 533static int worker_pool_assign_id(struct worker_pool *pool)
 534{
 535        int ret;
 536
 537        lockdep_assert_held(&wq_pool_mutex);
 538
 539        ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
 540                        GFP_KERNEL);
 541        if (ret >= 0) {
 542                pool->id = ret;
 543                return 0;
 544        }
 545        return ret;
 546}
 547
 548/**
 549 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
 550 * @wq: the target workqueue
 551 * @node: the node ID
 552 *
 553 * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU
 554 * read locked.
 555 * If the pwq needs to be used beyond the locking in effect, the caller is
 556 * responsible for guaranteeing that the pwq stays online.
 557 *
 558 * Return: The unbound pool_workqueue for @node.
 559 */
 560static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
 561                                                  int node)
 562{
 563        assert_rcu_or_wq_mutex_or_pool_mutex(wq);
 564
 565        /*
 566         * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
 567         * delayed item is pending.  The plan is to keep CPU -> NODE
 568         * mapping valid and stable across CPU on/offlines.  Once that
 569         * happens, this workaround can be removed.
 570         */
 571        if (unlikely(node == NUMA_NO_NODE))
 572                return wq->dfl_pwq;
 573
 574        return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
 575}
 576
 577static unsigned int work_color_to_flags(int color)
 578{
 579        return color << WORK_STRUCT_COLOR_SHIFT;
 580}
 581
 582static int get_work_color(struct work_struct *work)
 583{
 584        return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
 585                ((1 << WORK_STRUCT_COLOR_BITS) - 1);
 586}
 587
 588static int work_next_color(int color)
 589{
 590        return (color + 1) % WORK_NR_COLORS;
 591}
 592
 593/*
 594 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
 595 * contain the pointer to the queued pwq.  Once execution starts, the flag
 596 * is cleared and the high bits contain OFFQ flags and pool ID.
 597 *
 598 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
 599 * and clear_work_data() can be used to set the pwq, pool or clear
 600 * work->data.  These functions should only be called while the work is
 601 * owned - ie. while the PENDING bit is set.
 602 *
 603 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
 604 * corresponding to a work.  Pool is available once the work has been
 605 * queued anywhere after initialization until it is sync canceled.  pwq is
 606 * available only while the work item is queued.
 607 *
 608 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
 609 * canceled.  While being canceled, a work item may have its PENDING set
 610 * but stay off timer and worklist for arbitrarily long and nobody should
 611 * try to steal the PENDING bit.
 612 */
 613static inline void set_work_data(struct work_struct *work, unsigned long data,
 614                                 unsigned long flags)
 615{
 616        WARN_ON_ONCE(!work_pending(work));
 617        atomic_long_set(&work->data, data | flags | work_static(work));
 618}
 619
 620static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
 621                         unsigned long extra_flags)
 622{
 623        set_work_data(work, (unsigned long)pwq,
 624                      WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
 625}
 626
 627static void set_work_pool_and_keep_pending(struct work_struct *work,
 628                                           int pool_id)
 629{
 630        set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
 631                      WORK_STRUCT_PENDING);
 632}
 633
 634static void set_work_pool_and_clear_pending(struct work_struct *work,
 635                                            int pool_id)
 636{
 637        /*
 638         * The following wmb is paired with the implied mb in
 639         * test_and_set_bit(PENDING) and ensures all updates to @work made
 640         * here are visible to and precede any updates by the next PENDING
 641         * owner.
 642         */
 643        smp_wmb();
 644        set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
 645        /*
 646         * The following mb guarantees that previous clear of a PENDING bit
 647         * will not be reordered with any speculative LOADS or STORES from
 648         * work->current_func, which is executed afterwards.  This possible
 649         * reordering can lead to a missed execution on attempt to qeueue
 650         * the same @work.  E.g. consider this case:
 651         *
 652         *   CPU#0                         CPU#1
 653         *   ----------------------------  --------------------------------
 654         *
 655         * 1  STORE event_indicated
 656         * 2  queue_work_on() {
 657         * 3    test_and_set_bit(PENDING)
 658         * 4 }                             set_..._and_clear_pending() {
 659         * 5                                 set_work_data() # clear bit
 660         * 6                                 smp_mb()
 661         * 7                               work->current_func() {
 662         * 8                                  LOAD event_indicated
 663         *                                 }
 664         *
 665         * Without an explicit full barrier speculative LOAD on line 8 can
 666         * be executed before CPU#0 does STORE on line 1.  If that happens,
 667         * CPU#0 observes the PENDING bit is still set and new execution of
 668         * a @work is not queued in a hope, that CPU#1 will eventually
 669         * finish the queued @work.  Meanwhile CPU#1 does not see
 670         * event_indicated is set, because speculative LOAD was executed
 671         * before actual STORE.
 672         */
 673        smp_mb();
 674}
 675
 676static void clear_work_data(struct work_struct *work)
 677{
 678        smp_wmb();      /* see set_work_pool_and_clear_pending() */
 679        set_work_data(work, WORK_STRUCT_NO_POOL, 0);
 680}
 681
 682static struct pool_workqueue *get_work_pwq(struct work_struct *work)
 683{
 684        unsigned long data = atomic_long_read(&work->data);
 685
 686        if (data & WORK_STRUCT_PWQ)
 687                return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
 688        else
 689                return NULL;
 690}
 691
 692/**
 693 * get_work_pool - return the worker_pool a given work was associated with
 694 * @work: the work item of interest
 695 *
 696 * Pools are created and destroyed under wq_pool_mutex, and allows read
 697 * access under sched-RCU read lock.  As such, this function should be
 698 * called under wq_pool_mutex or with preemption disabled.
 699 *
 700 * All fields of the returned pool are accessible as long as the above
 701 * mentioned locking is in effect.  If the returned pool needs to be used
 702 * beyond the critical section, the caller is responsible for ensuring the
 703 * returned pool is and stays online.
 704 *
 705 * Return: The worker_pool @work was last associated with.  %NULL if none.
 706 */
 707static struct worker_pool *get_work_pool(struct work_struct *work)
 708{
 709        unsigned long data = atomic_long_read(&work->data);
 710        int pool_id;
 711
 712        assert_rcu_or_pool_mutex();
 713
 714        if (data & WORK_STRUCT_PWQ)
 715                return ((struct pool_workqueue *)
 716                        (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
 717
 718        pool_id = data >> WORK_OFFQ_POOL_SHIFT;
 719        if (pool_id == WORK_OFFQ_POOL_NONE)
 720                return NULL;
 721
 722        return idr_find(&worker_pool_idr, pool_id);
 723}
 724
 725/**
 726 * get_work_pool_id - return the worker pool ID a given work is associated with
 727 * @work: the work item of interest
 728 *
 729 * Return: The worker_pool ID @work was last associated with.
 730 * %WORK_OFFQ_POOL_NONE if none.
 731 */
 732static int get_work_pool_id(struct work_struct *work)
 733{
 734        unsigned long data = atomic_long_read(&work->data);
 735
 736        if (data & WORK_STRUCT_PWQ)
 737                return ((struct pool_workqueue *)
 738                        (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
 739
 740        return data >> WORK_OFFQ_POOL_SHIFT;
 741}
 742
 743static void mark_work_canceling(struct work_struct *work)
 744{
 745        unsigned long pool_id = get_work_pool_id(work);
 746
 747        pool_id <<= WORK_OFFQ_POOL_SHIFT;
 748        set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
 749}
 750
 751static bool work_is_canceling(struct work_struct *work)
 752{
 753        unsigned long data = atomic_long_read(&work->data);
 754
 755        return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
 756}
 757
 758/*
 759 * Policy functions.  These define the policies on how the global worker
 760 * pools are managed.  Unless noted otherwise, these functions assume that
 761 * they're being called with pool->lock held.
 762 */
 763
 764static bool __need_more_worker(struct worker_pool *pool)
 765{
 766        return !atomic_read(&pool->nr_running);
 767}
 768
 769/*
 770 * Need to wake up a worker?  Called from anything but currently
 771 * running workers.
 772 *
 773 * Note that, because unbound workers never contribute to nr_running, this
 774 * function will always return %true for unbound pools as long as the
 775 * worklist isn't empty.
 776 */
 777static bool need_more_worker(struct worker_pool *pool)
 778{
 779        return !list_empty(&pool->worklist) && __need_more_worker(pool);
 780}
 781
 782/* Can I start working?  Called from busy but !running workers. */
 783static bool may_start_working(struct worker_pool *pool)
 784{
 785        return pool->nr_idle;
 786}
 787
 788/* Do I need to keep working?  Called from currently running workers. */
 789static bool keep_working(struct worker_pool *pool)
 790{
 791        return !list_empty(&pool->worklist) &&
 792                atomic_read(&pool->nr_running) <= 1;
 793}
 794
 795/* Do we need a new worker?  Called from manager. */
 796static bool need_to_create_worker(struct worker_pool *pool)
 797{
 798        return need_more_worker(pool) && !may_start_working(pool);
 799}
 800
 801/* Do we have too many workers and should some go away? */
 802static bool too_many_workers(struct worker_pool *pool)
 803{
 804        bool managing = mutex_is_locked(&pool->manager_arb);
 805        int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
 806        int nr_busy = pool->nr_workers - nr_idle;
 807
 808        return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
 809}
 810
 811/*
 812 * Wake up functions.
 813 */
 814
 815/* Return the first idle worker.  Safe with preemption disabled */
 816static struct worker *first_idle_worker(struct worker_pool *pool)
 817{
 818        if (unlikely(list_empty(&pool->idle_list)))
 819                return NULL;
 820
 821        return list_first_entry(&pool->idle_list, struct worker, entry);
 822}
 823
 824/**
 825 * wake_up_worker - wake up an idle worker
 826 * @pool: worker pool to wake worker from
 827 *
 828 * Wake up the first idle worker of @pool.
 829 *
 830 * CONTEXT:
 831 * spin_lock_irq(pool->lock).
 832 */
 833static void wake_up_worker(struct worker_pool *pool)
 834{
 835        struct worker *worker = first_idle_worker(pool);
 836
 837        if (likely(worker))
 838                wake_up_process(worker->task);
 839}
 840
 841/**
 842 * wq_worker_waking_up - a worker is waking up
 843 * @task: task waking up
 844 * @cpu: CPU @task is waking up to
 845 *
 846 * This function is called during try_to_wake_up() when a worker is
 847 * being awoken.
 848 *
 849 * CONTEXT:
 850 * spin_lock_irq(rq->lock)
 851 */
 852void wq_worker_waking_up(struct task_struct *task, int cpu)
 853{
 854        struct worker *worker = kthread_data(task);
 855
 856        if (!(worker->flags & WORKER_NOT_RUNNING)) {
 857                WARN_ON_ONCE(worker->pool->cpu != cpu);
 858                atomic_inc(&worker->pool->nr_running);
 859        }
 860}
 861
 862/**
 863 * wq_worker_sleeping - a worker is going to sleep
 864 * @task: task going to sleep
 865 *
 866 * This function is called during schedule() when a busy worker is
 867 * going to sleep.  Worker on the same cpu can be woken up by
 868 * returning pointer to its task.
 869 *
 870 * CONTEXT:
 871 * spin_lock_irq(rq->lock)
 872 *
 873 * Return:
 874 * Worker task on @cpu to wake up, %NULL if none.
 875 */
 876struct task_struct *wq_worker_sleeping(struct task_struct *task)
 877{
 878        struct worker *worker = kthread_data(task), *to_wakeup = NULL;
 879        struct worker_pool *pool;
 880
 881        /*
 882         * Rescuers, which may not have all the fields set up like normal
 883         * workers, also reach here, let's not access anything before
 884         * checking NOT_RUNNING.
 885         */
 886        if (worker->flags & WORKER_NOT_RUNNING)
 887                return NULL;
 888
 889        pool = worker->pool;
 890
 891        /* this can only happen on the local cpu */
 892        if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id()))
 893                return NULL;
 894
 895        /*
 896         * The counterpart of the following dec_and_test, implied mb,
 897         * worklist not empty test sequence is in insert_work().
 898         * Please read comment there.
 899         *
 900         * NOT_RUNNING is clear.  This means that we're bound to and
 901         * running on the local cpu w/ rq lock held and preemption
 902         * disabled, which in turn means that none else could be
 903         * manipulating idle_list, so dereferencing idle_list without pool
 904         * lock is safe.
 905         */
 906        if (atomic_dec_and_test(&pool->nr_running) &&
 907            !list_empty(&pool->worklist))
 908                to_wakeup = first_idle_worker(pool);
 909        return to_wakeup ? to_wakeup->task : NULL;
 910}
 911
 912/**
 913 * worker_set_flags - set worker flags and adjust nr_running accordingly
 914 * @worker: self
 915 * @flags: flags to set
 916 *
 917 * Set @flags in @worker->flags and adjust nr_running accordingly.
 918 *
 919 * CONTEXT:
 920 * spin_lock_irq(pool->lock)
 921 */
 922static inline void worker_set_flags(struct worker *worker, unsigned int flags)
 923{
 924        struct worker_pool *pool = worker->pool;
 925
 926        WARN_ON_ONCE(worker->task != current);
 927
 928        /* If transitioning into NOT_RUNNING, adjust nr_running. */
 929        if ((flags & WORKER_NOT_RUNNING) &&
 930            !(worker->flags & WORKER_NOT_RUNNING)) {
 931                atomic_dec(&pool->nr_running);
 932        }
 933
 934        worker->flags |= flags;
 935}
 936
 937/**
 938 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
 939 * @worker: self
 940 * @flags: flags to clear
 941 *
 942 * Clear @flags in @worker->flags and adjust nr_running accordingly.
 943 *
 944 * CONTEXT:
 945 * spin_lock_irq(pool->lock)
 946 */
 947static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 948{
 949        struct worker_pool *pool = worker->pool;
 950        unsigned int oflags = worker->flags;
 951
 952        WARN_ON_ONCE(worker->task != current);
 953
 954        worker->flags &= ~flags;
 955
 956        /*
 957         * If transitioning out of NOT_RUNNING, increment nr_running.  Note
 958         * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
 959         * of multiple flags, not a single flag.
 960         */
 961        if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
 962                if (!(worker->flags & WORKER_NOT_RUNNING))
 963                        atomic_inc(&pool->nr_running);
 964}
 965
 966/**
 967 * find_worker_executing_work - find worker which is executing a work
 968 * @pool: pool of interest
 969 * @work: work to find worker for
 970 *
 971 * Find a worker which is executing @work on @pool by searching
 972 * @pool->busy_hash which is keyed by the address of @work.  For a worker
 973 * to match, its current execution should match the address of @work and
 974 * its work function.  This is to avoid unwanted dependency between
 975 * unrelated work executions through a work item being recycled while still
 976 * being executed.
 977 *
 978 * This is a bit tricky.  A work item may be freed once its execution
 979 * starts and nothing prevents the freed area from being recycled for
 980 * another work item.  If the same work item address ends up being reused
 981 * before the original execution finishes, workqueue will identify the
 982 * recycled work item as currently executing and make it wait until the
 983 * current execution finishes, introducing an unwanted dependency.
 984 *
 985 * This function checks the work item address and work function to avoid
 986 * false positives.  Note that this isn't complete as one may construct a
 987 * work function which can introduce dependency onto itself through a
 988 * recycled work item.  Well, if somebody wants to shoot oneself in the
 989 * foot that badly, there's only so much we can do, and if such deadlock
 990 * actually occurs, it should be easy to locate the culprit work function.
 991 *
 992 * CONTEXT:
 993 * spin_lock_irq(pool->lock).
 994 *
 995 * Return:
 996 * Pointer to worker which is executing @work if found, %NULL
 997 * otherwise.
 998 */
 999static struct worker *find_worker_executing_work(struct worker_pool *pool,
1000                                                 struct work_struct *work)
1001{
1002        struct worker *worker;
1003
1004        hash_for_each_possible(pool->busy_hash, worker, hentry,
1005                               (unsigned long)work)
1006                if (worker->current_work == work &&
1007                    worker->current_func == work->func)
1008                        return worker;
1009
1010        return NULL;
1011}
1012
1013/**
1014 * move_linked_works - move linked works to a list
1015 * @work: start of series of works to be scheduled
1016 * @head: target list to append @work to
1017 * @nextp: out parameter for nested worklist walking
1018 *
1019 * Schedule linked works starting from @work to @head.  Work series to
1020 * be scheduled starts at @work and includes any consecutive work with
1021 * WORK_STRUCT_LINKED set in its predecessor.
1022 *
1023 * If @nextp is not NULL, it's updated to point to the next work of
1024 * the last scheduled work.  This allows move_linked_works() to be
1025 * nested inside outer list_for_each_entry_safe().
1026 *
1027 * CONTEXT:
1028 * spin_lock_irq(pool->lock).
1029 */
1030static void move_linked_works(struct work_struct *work, struct list_head *head,
1031                              struct work_struct **nextp)
1032{
1033        struct work_struct *n;
1034
1035        /*
1036         * Linked worklist will always end before the end of the list,
1037         * use NULL for list head.
1038         */
1039        list_for_each_entry_safe_from(work, n, NULL, entry) {
1040                list_move_tail(&work->entry, head);
1041                if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1042                        break;
1043        }
1044
1045        /*
1046         * If we're already inside safe list traversal and have moved
1047         * multiple works to the scheduled queue, the next position
1048         * needs to be updated.
1049         */
1050        if (nextp)
1051                *nextp = n;
1052}
1053
1054/**
1055 * get_pwq - get an extra reference on the specified pool_workqueue
1056 * @pwq: pool_workqueue to get
1057 *
1058 * Obtain an extra reference on @pwq.  The caller should guarantee that
1059 * @pwq has positive refcnt and be holding the matching pool->lock.
1060 */
1061static void get_pwq(struct pool_workqueue *pwq)
1062{
1063        lockdep_assert_held(&pwq->pool->lock);
1064        WARN_ON_ONCE(pwq->refcnt <= 0);
1065        pwq->refcnt++;
1066}
1067
1068/**
1069 * put_pwq - put a pool_workqueue reference
1070 * @pwq: pool_workqueue to put
1071 *
1072 * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
1073 * destruction.  The caller should be holding the matching pool->lock.
1074 */
1075static void put_pwq(struct pool_workqueue *pwq)
1076{
1077        lockdep_assert_held(&pwq->pool->lock);
1078        if (likely(--pwq->refcnt))
1079                return;
1080        if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1081                return;
1082        /*
1083         * @pwq can't be released under pool->lock, bounce to
1084         * pwq_unbound_release_workfn().  This never recurses on the same
1085         * pool->lock as this path is taken only for unbound workqueues and
1086         * the release work item is scheduled on a per-cpu workqueue.  To
1087         * avoid lockdep warning, unbound pool->locks are given lockdep
1088         * subclass of 1 in get_unbound_pool().
1089         */
1090        schedule_work(&pwq->unbound_release_work);
1091}
1092
1093/**
1094 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1095 * @pwq: pool_workqueue to put (can be %NULL)
1096 *
1097 * put_pwq() with locking.  This function also allows %NULL @pwq.
1098 */
1099static void put_pwq_unlocked(struct pool_workqueue *pwq)
1100{
1101        if (pwq) {
1102                /*
1103                 * As both pwqs and pools are sched-RCU protected, the
1104                 * following lock operations are safe.
1105                 */
1106                spin_lock_irq(&pwq->pool->lock);
1107                put_pwq(pwq);
1108                spin_unlock_irq(&pwq->pool->lock);
1109        }
1110}
1111
1112static void pwq_activate_delayed_work(struct work_struct *work)
1113{
1114        struct pool_workqueue *pwq = get_work_pwq(work);
1115
1116        trace_workqueue_activate_work(work);
1117        if (list_empty(&pwq->pool->worklist))
1118                pwq->pool->watchdog_ts = jiffies;
1119        move_linked_works(work, &pwq->pool->worklist, NULL);
1120        __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1121        pwq->nr_active++;
1122}
1123
1124static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
1125{
1126        struct work_struct *work = list_first_entry(&pwq->delayed_works,
1127                                                    struct work_struct, entry);
1128
1129        pwq_activate_delayed_work(work);
1130}
1131
1132/**
1133 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1134 * @pwq: pwq of interest
1135 * @color: color of work which left the queue
1136 *
1137 * A work either has completed or is removed from pending queue,
1138 * decrement nr_in_flight of its pwq and handle workqueue flushing.
1139 *
1140 * CONTEXT:
1141 * spin_lock_irq(pool->lock).
1142 */
1143static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1144{
1145        /* uncolored work items don't participate in flushing or nr_active */
1146        if (color == WORK_NO_COLOR)
1147                goto out_put;
1148
1149        pwq->nr_in_flight[color]--;
1150
1151        pwq->nr_active--;
1152        if (!list_empty(&pwq->delayed_works)) {
1153                /* one down, submit a delayed one */
1154                if (pwq->nr_active < pwq->max_active)
1155                        pwq_activate_first_delayed(pwq);
1156        }
1157
1158        /* is flush in progress and are we at the flushing tip? */
1159        if (likely(pwq->flush_color != color))
1160                goto out_put;
1161
1162        /* are there still in-flight works? */
1163        if (pwq->nr_in_flight[color])
1164                goto out_put;
1165
1166        /* this pwq is done, clear flush_color */
1167        pwq->flush_color = -1;
1168
1169        /*
1170         * If this was the last pwq, wake up the first flusher.  It
1171         * will handle the rest.
1172         */
1173        if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1174                complete(&pwq->wq->first_flusher->done);
1175out_put:
1176        put_pwq(pwq);
1177}
1178
1179/**
1180 * try_to_grab_pending - steal work item from worklist and disable irq
1181 * @work: work item to steal
1182 * @is_dwork: @work is a delayed_work
1183 * @flags: place to store irq state
1184 *
1185 * Try to grab PENDING bit of @work.  This function can handle @work in any
1186 * stable state - idle, on timer or on worklist.
1187 *
1188 * Return:
1189 *  1           if @work was pending and we successfully stole PENDING
1190 *  0           if @work was idle and we claimed PENDING
1191 *  -EAGAIN     if PENDING couldn't be grabbed at the moment, safe to busy-retry
1192 *  -ENOENT     if someone else is canceling @work, this state may persist
1193 *              for arbitrarily long
1194 *
1195 * Note:
1196 * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
1197 * interrupted while holding PENDING and @work off queue, irq must be
1198 * disabled on entry.  This, combined with delayed_work->timer being
1199 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1200 *
1201 * On successful return, >= 0, irq is disabled and the caller is
1202 * responsible for releasing it using local_irq_restore(*@flags).
1203 *
1204 * This function is safe to call from any context including IRQ handler.
1205 */
1206static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1207                               unsigned long *flags)
1208{
1209        struct worker_pool *pool;
1210        struct pool_workqueue *pwq;
1211
1212        local_irq_save(*flags);
1213
1214        /* try to steal the timer if it exists */
1215        if (is_dwork) {
1216                struct delayed_work *dwork = to_delayed_work(work);
1217
1218                /*
1219                 * dwork->timer is irqsafe.  If del_timer() fails, it's
1220                 * guaranteed that the timer is not queued anywhere and not
1221                 * running on the local CPU.
1222                 */
1223                if (likely(del_timer(&dwork->timer)))
1224                        return 1;
1225        }
1226
1227        /* try to claim PENDING the normal way */
1228        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1229                return 0;
1230
1231        /*
1232         * The queueing is in progress, or it is already queued. Try to
1233         * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1234         */
1235        pool = get_work_pool(work);
1236        if (!pool)
1237                goto fail;
1238
1239        spin_lock(&pool->lock);
1240        /*
1241         * work->data is guaranteed to point to pwq only while the work
1242         * item is queued on pwq->wq, and both updating work->data to point
1243         * to pwq on queueing and to pool on dequeueing are done under
1244         * pwq->pool->lock.  This in turn guarantees that, if work->data
1245         * points to pwq which is associated with a locked pool, the work
1246         * item is currently queued on that pool.
1247         */
1248        pwq = get_work_pwq(work);
1249        if (pwq && pwq->pool == pool) {
1250                debug_work_deactivate(work);
1251
1252                /*
1253                 * A delayed work item cannot be grabbed directly because
1254                 * it might have linked NO_COLOR work items which, if left
1255                 * on the delayed_list, will confuse pwq->nr_active
1256                 * management later on and cause stall.  Make sure the work
1257                 * item is activated before grabbing.
1258                 */
1259                if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1260                        pwq_activate_delayed_work(work);
1261
1262                list_del_init(&work->entry);
1263                pwq_dec_nr_in_flight(pwq, get_work_color(work));
1264
1265                /* work->data points to pwq iff queued, point to pool */
1266                set_work_pool_and_keep_pending(work, pool->id);
1267
1268                spin_unlock(&pool->lock);
1269                return 1;
1270        }
1271        spin_unlock(&pool->lock);
1272fail:
1273        local_irq_restore(*flags);
1274        if (work_is_canceling(work))
1275                return -ENOENT;
1276        cpu_relax();
1277        return -EAGAIN;
1278}
1279
1280/**
1281 * insert_work - insert a work into a pool
1282 * @pwq: pwq @work belongs to
1283 * @work: work to insert
1284 * @head: insertion point
1285 * @extra_flags: extra WORK_STRUCT_* flags to set
1286 *
1287 * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
1288 * work_struct flags.
1289 *
1290 * CONTEXT:
1291 * spin_lock_irq(pool->lock).
1292 */
1293static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1294                        struct list_head *head, unsigned int extra_flags)
1295{
1296        struct worker_pool *pool = pwq->pool;
1297
1298        /* we own @work, set data and link */
1299        set_work_pwq(work, pwq, extra_flags);
1300        list_add_tail(&work->entry, head);
1301        get_pwq(pwq);
1302
1303        /*
1304         * Ensure either wq_worker_sleeping() sees the above
1305         * list_add_tail() or we see zero nr_running to avoid workers lying
1306         * around lazily while there are works to be processed.
1307         */
1308        smp_mb();
1309
1310        if (__need_more_worker(pool))
1311                wake_up_worker(pool);
1312}
1313
1314/*
1315 * Test whether @work is being queued from another work executing on the
1316 * same workqueue.
1317 */
1318static bool is_chained_work(struct workqueue_struct *wq)
1319{
1320        struct worker *worker;
1321
1322        worker = current_wq_worker();
1323        /*
1324         * Return %true iff I'm a worker execuing a work item on @wq.  If
1325         * I'm @worker, it's safe to dereference it without locking.
1326         */
1327        return worker && worker->current_pwq->wq == wq;
1328}
1329
1330/*
1331 * When queueing an unbound work item to a wq, prefer local CPU if allowed
1332 * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
1333 * avoid perturbing sensitive tasks.
1334 */
1335static int wq_select_unbound_cpu(int cpu)
1336{
1337        static bool printed_dbg_warning;
1338        int new_cpu;
1339
1340        if (likely(!wq_debug_force_rr_cpu)) {
1341                if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1342                        return cpu;
1343        } else if (!printed_dbg_warning) {
1344                pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
1345                printed_dbg_warning = true;
1346        }
1347
1348        if (cpumask_empty(wq_unbound_cpumask))
1349                return cpu;
1350
1351        new_cpu = __this_cpu_read(wq_rr_cpu_last);
1352        new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1353        if (unlikely(new_cpu >= nr_cpu_ids)) {
1354                new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1355                if (unlikely(new_cpu >= nr_cpu_ids))
1356                        return cpu;
1357        }
1358        __this_cpu_write(wq_rr_cpu_last, new_cpu);
1359
1360        return new_cpu;
1361}
1362
1363static void __queue_work(int cpu, struct workqueue_struct *wq,
1364                         struct work_struct *work)
1365{
1366        struct pool_workqueue *pwq;
1367        struct worker_pool *last_pool;
1368        struct list_head *worklist;
1369        unsigned int work_flags;
1370        unsigned int req_cpu = cpu;
1371
1372        /*
1373         * While a work item is PENDING && off queue, a task trying to
1374         * steal the PENDING will busy-loop waiting for it to either get
1375         * queued or lose PENDING.  Grabbing PENDING and queueing should
1376         * happen with IRQ disabled.
1377         */
1378        WARN_ON_ONCE(!irqs_disabled());
1379
1380        debug_work_activate(work);
1381
1382        /* if draining, only works from the same workqueue are allowed */
1383        if (unlikely(wq->flags & __WQ_DRAINING) &&
1384            WARN_ON_ONCE(!is_chained_work(wq)))
1385                return;
1386retry:
1387        if (req_cpu == WORK_CPU_UNBOUND)
1388                cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1389
1390        /* pwq which will be used unless @work is executing elsewhere */
1391        if (!(wq->flags & WQ_UNBOUND))
1392                pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1393        else
1394                pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1395
1396        /*
1397         * If @work was previously on a different pool, it might still be
1398         * running there, in which case the work needs to be queued on that
1399         * pool to guarantee non-reentrancy.
1400         */
1401        last_pool = get_work_pool(work);
1402        if (last_pool && last_pool != pwq->pool) {
1403                struct worker *worker;
1404
1405                spin_lock(&last_pool->lock);
1406
1407                worker = find_worker_executing_work(last_pool, work);
1408
1409                if (worker && worker->current_pwq->wq == wq) {
1410                        pwq = worker->current_pwq;
1411                } else {
1412                        /* meh... not running there, queue here */
1413                        spin_unlock(&last_pool->lock);
1414                        spin_lock(&pwq->pool->lock);
1415                }
1416        } else {
1417                spin_lock(&pwq->pool->lock);
1418        }
1419
1420        /*
1421         * pwq is determined and locked.  For unbound pools, we could have
1422         * raced with pwq release and it could already be dead.  If its
1423         * refcnt is zero, repeat pwq selection.  Note that pwqs never die
1424         * without another pwq replacing it in the numa_pwq_tbl or while
1425         * work items are executing on it, so the retrying is guaranteed to
1426         * make forward-progress.
1427         */
1428        if (unlikely(!pwq->refcnt)) {
1429                if (wq->flags & WQ_UNBOUND) {
1430                        spin_unlock(&pwq->pool->lock);
1431                        cpu_relax();
1432                        goto retry;
1433                }
1434                /* oops */
1435                WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1436                          wq->name, cpu);
1437        }
1438
1439        /* pwq determined, queue */
1440        trace_workqueue_queue_work(req_cpu, pwq, work);
1441
1442        if (WARN_ON(!list_empty(&work->entry))) {
1443                spin_unlock(&pwq->pool->lock);
1444                return;
1445        }
1446
1447        pwq->nr_in_flight[pwq->work_color]++;
1448        work_flags = work_color_to_flags(pwq->work_color);
1449
1450        if (likely(pwq->nr_active < pwq->max_active)) {
1451                trace_workqueue_activate_work(work);
1452                pwq->nr_active++;
1453                worklist = &pwq->pool->worklist;
1454                if (list_empty(worklist))
1455                        pwq->pool->watchdog_ts = jiffies;
1456        } else {
1457                work_flags |= WORK_STRUCT_DELAYED;
1458                worklist = &pwq->delayed_works;
1459        }
1460
1461        insert_work(pwq, work, worklist, work_flags);
1462
1463        spin_unlock(&pwq->pool->lock);
1464}
1465
1466/**
1467 * queue_work_on - queue work on specific cpu
1468 * @cpu: CPU number to execute work on
1469 * @wq: workqueue to use
1470 * @work: work to queue
1471 *
1472 * We queue the work to a specific CPU, the caller must ensure it
1473 * can't go away.
1474 *
1475 * Return: %false if @work was already on a queue, %true otherwise.
1476 */
1477bool queue_work_on(int cpu, struct workqueue_struct *wq,
1478                   struct work_struct *work)
1479{
1480        bool ret = false;
1481        unsigned long flags;
1482
1483        local_irq_save(flags);
1484
1485        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1486                __queue_work(cpu, wq, work);
1487                ret = true;
1488        }
1489
1490        local_irq_restore(flags);
1491        return ret;
1492}
1493EXPORT_SYMBOL(queue_work_on);
1494
1495void delayed_work_timer_fn(unsigned long __data)
1496{
1497        struct delayed_work *dwork = (struct delayed_work *)__data;
1498
1499        /* should have been called from irqsafe timer with irq already off */
1500        __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1501}
1502EXPORT_SYMBOL(delayed_work_timer_fn);
1503
1504static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1505                                struct delayed_work *dwork, unsigned long delay)
1506{
1507        struct timer_list *timer = &dwork->timer;
1508        struct work_struct *work = &dwork->work;
1509
1510        WARN_ON_ONCE(!wq);
1511        WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
1512                     timer->data != (unsigned long)dwork);
1513        WARN_ON_ONCE(timer_pending(timer));
1514        WARN_ON_ONCE(!list_empty(&work->entry));
1515
1516        /*
1517         * If @delay is 0, queue @dwork->work immediately.  This is for
1518         * both optimization and correctness.  The earliest @timer can
1519         * expire is on the closest next tick and delayed_work users depend
1520         * on that there's no such delay when @delay is 0.
1521         */
1522        if (!delay) {
1523                __queue_work(cpu, wq, &dwork->work);
1524                return;
1525        }
1526
1527        dwork->wq = wq;
1528        dwork->cpu = cpu;
1529        timer->expires = jiffies + delay;
1530
1531        if (unlikely(cpu != WORK_CPU_UNBOUND))
1532                add_timer_on(timer, cpu);
1533        else
1534                add_timer(timer);
1535}
1536
1537/**
1538 * queue_delayed_work_on - queue work on specific CPU after delay
1539 * @cpu: CPU number to execute work on
1540 * @wq: workqueue to use
1541 * @dwork: work to queue
1542 * @delay: number of jiffies to wait before queueing
1543 *
1544 * Return: %false if @work was already on a queue, %true otherwise.  If
1545 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1546 * execution.
1547 */
1548bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1549                           struct delayed_work *dwork, unsigned long delay)
1550{
1551        struct work_struct *work = &dwork->work;
1552        bool ret = false;
1553        unsigned long flags;
1554
1555        /* read the comment in __queue_work() */
1556        local_irq_save(flags);
1557
1558        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1559                __queue_delayed_work(cpu, wq, dwork, delay);
1560                ret = true;
1561        }
1562
1563        local_irq_restore(flags);
1564        return ret;
1565}
1566EXPORT_SYMBOL(queue_delayed_work_on);
1567
1568/**
1569 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1570 * @cpu: CPU number to execute work on
1571 * @wq: workqueue to use
1572 * @dwork: work to queue
1573 * @delay: number of jiffies to wait before queueing
1574 *
1575 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1576 * modify @dwork's timer so that it expires after @delay.  If @delay is
1577 * zero, @work is guaranteed to be scheduled immediately regardless of its
1578 * current state.
1579 *
1580 * Return: %false if @dwork was idle and queued, %true if @dwork was
1581 * pending and its timer was modified.
1582 *
1583 * This function is safe to call from any context including IRQ handler.
1584 * See try_to_grab_pending() for details.
1585 */
1586bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1587                         struct delayed_work *dwork, unsigned long delay)
1588{
1589        unsigned long flags;
1590        int ret;
1591
1592        do {
1593                ret = try_to_grab_pending(&dwork->work, true, &flags);
1594        } while (unlikely(ret == -EAGAIN));
1595
1596        if (likely(ret >= 0)) {
1597                __queue_delayed_work(cpu, wq, dwork, delay);
1598                local_irq_restore(flags);
1599        }
1600
1601        /* -ENOENT from try_to_grab_pending() becomes %true */
1602        return ret;
1603}
1604EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1605
1606/**
1607 * worker_enter_idle - enter idle state
1608 * @worker: worker which is entering idle state
1609 *
1610 * @worker is entering idle state.  Update stats and idle timer if
1611 * necessary.
1612 *
1613 * LOCKING:
1614 * spin_lock_irq(pool->lock).
1615 */
1616static void worker_enter_idle(struct worker *worker)
1617{
1618        struct worker_pool *pool = worker->pool;
1619
1620        if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1621            WARN_ON_ONCE(!list_empty(&worker->entry) &&
1622                         (worker->hentry.next || worker->hentry.pprev)))
1623                return;
1624
1625        /* can't use worker_set_flags(), also called from create_worker() */
1626        worker->flags |= WORKER_IDLE;
1627        pool->nr_idle++;
1628        worker->last_active = jiffies;
1629
1630        /* idle_list is LIFO */
1631        list_add(&worker->entry, &pool->idle_list);
1632
1633        if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1634                mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1635
1636        /*
1637         * Sanity check nr_running.  Because wq_unbind_fn() releases
1638         * pool->lock between setting %WORKER_UNBOUND and zapping
1639         * nr_running, the warning may trigger spuriously.  Check iff
1640         * unbind is not in progress.
1641         */
1642        WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1643                     pool->nr_workers == pool->nr_idle &&
1644                     atomic_read(&pool->nr_running));
1645}
1646
1647/**
1648 * worker_leave_idle - leave idle state
1649 * @worker: worker which is leaving idle state
1650 *
1651 * @worker is leaving idle state.  Update stats.
1652 *
1653 * LOCKING:
1654 * spin_lock_irq(pool->lock).
1655 */
1656static void worker_leave_idle(struct worker *worker)
1657{
1658        struct worker_pool *pool = worker->pool;
1659
1660        if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1661                return;
1662        worker_clr_flags(worker, WORKER_IDLE);
1663        pool->nr_idle--;
1664        list_del_init(&worker->entry);
1665}
1666
1667static struct worker *alloc_worker(int node)
1668{
1669        struct worker *worker;
1670
1671        worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1672        if (worker) {
1673                INIT_LIST_HEAD(&worker->entry);
1674                INIT_LIST_HEAD(&worker->scheduled);
1675                INIT_LIST_HEAD(&worker->node);
1676                /* on creation a worker is in !idle && prep state */
1677                worker->flags = WORKER_PREP;
1678        }
1679        return worker;
1680}
1681
1682/**
1683 * worker_attach_to_pool() - attach a worker to a pool
1684 * @worker: worker to be attached
1685 * @pool: the target pool
1686 *
1687 * Attach @worker to @pool.  Once attached, the %WORKER_UNBOUND flag and
1688 * cpu-binding of @worker are kept coordinated with the pool across
1689 * cpu-[un]hotplugs.
1690 */
1691static void worker_attach_to_pool(struct worker *worker,
1692                                   struct worker_pool *pool)
1693{
1694        mutex_lock(&pool->attach_mutex);
1695
1696        /*
1697         * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1698         * online CPUs.  It'll be re-applied when any of the CPUs come up.
1699         */
1700        set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1701
1702        /*
1703         * The pool->attach_mutex ensures %POOL_DISASSOCIATED remains
1704         * stable across this function.  See the comments above the
1705         * flag definition for details.
1706         */
1707        if (pool->flags & POOL_DISASSOCIATED)
1708                worker->flags |= WORKER_UNBOUND;
1709
1710        list_add_tail(&worker->node, &pool->workers);
1711
1712        mutex_unlock(&pool->attach_mutex);
1713}
1714
1715/**
1716 * worker_detach_from_pool() - detach a worker from its pool
1717 * @worker: worker which is attached to its pool
1718 * @pool: the pool @worker is attached to
1719 *
1720 * Undo the attaching which had been done in worker_attach_to_pool().  The
1721 * caller worker shouldn't access to the pool after detached except it has
1722 * other reference to the pool.
1723 */
1724static void worker_detach_from_pool(struct worker *worker,
1725                                    struct worker_pool *pool)
1726{
1727        struct completion *detach_completion = NULL;
1728
1729        mutex_lock(&pool->attach_mutex);
1730        list_del(&worker->node);
1731        if (list_empty(&pool->workers))
1732                detach_completion = pool->detach_completion;
1733        mutex_unlock(&pool->attach_mutex);
1734
1735        /* clear leftover flags without pool->lock after it is detached */
1736        worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1737
1738        if (detach_completion)
1739                complete(detach_completion);
1740}
1741
1742/**
1743 * create_worker - create a new workqueue worker
1744 * @pool: pool the new worker will belong to
1745 *
1746 * Create and start a new worker which is attached to @pool.
1747 *
1748 * CONTEXT:
1749 * Might sleep.  Does GFP_KERNEL allocations.
1750 *
1751 * Return:
1752 * Pointer to the newly created worker.
1753 */
1754static struct worker *create_worker(struct worker_pool *pool)
1755{
1756        struct worker *worker = NULL;
1757        int id = -1;
1758        char id_buf[16];
1759
1760        /* ID is needed to determine kthread name */
1761        id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
1762        if (id < 0)
1763                goto fail;
1764
1765        worker = alloc_worker(pool->node);
1766        if (!worker)
1767                goto fail;
1768
1769        worker->pool = pool;
1770        worker->id = id;
1771
1772        if (pool->cpu >= 0)
1773                snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1774                         pool->attrs->nice < 0  ? "H" : "");
1775        else
1776                snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1777
1778        worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1779                                              "kworker/%s", id_buf);
1780        if (IS_ERR(worker->task))
1781                goto fail;
1782
1783        set_user_nice(worker->task, pool->attrs->nice);
1784        kthread_bind_mask(worker->task, pool->attrs->cpumask);
1785
1786        /* successful, attach the worker to the pool */
1787        worker_attach_to_pool(worker, pool);
1788
1789        /* start the newly created worker */
1790        spin_lock_irq(&pool->lock);
1791        worker->pool->nr_workers++;
1792        worker_enter_idle(worker);
1793        wake_up_process(worker->task);
1794        spin_unlock_irq(&pool->lock);
1795
1796        return worker;
1797
1798fail:
1799        if (id >= 0)
1800                ida_simple_remove(&pool->worker_ida, id);
1801        kfree(worker);
1802        return NULL;
1803}
1804
1805/**
1806 * destroy_worker - destroy a workqueue worker
1807 * @worker: worker to be destroyed
1808 *
1809 * Destroy @worker and adjust @pool stats accordingly.  The worker should
1810 * be idle.
1811 *
1812 * CONTEXT:
1813 * spin_lock_irq(pool->lock).
1814 */
1815static void destroy_worker(struct worker *worker)
1816{
1817        struct worker_pool *pool = worker->pool;
1818
1819        lockdep_assert_held(&pool->lock);
1820
1821        /* sanity check frenzy */
1822        if (WARN_ON(worker->current_work) ||
1823            WARN_ON(!list_empty(&worker->scheduled)) ||
1824            WARN_ON(!(worker->flags & WORKER_IDLE)))
1825                return;
1826
1827        pool->nr_workers--;
1828        pool->nr_idle--;
1829
1830        list_del_init(&worker->entry);
1831        worker->flags |= WORKER_DIE;
1832        wake_up_process(worker->task);
1833}
1834
1835static void idle_worker_timeout(unsigned long __pool)
1836{
1837        struct worker_pool *pool = (void *)__pool;
1838
1839        spin_lock_irq(&pool->lock);
1840
1841        while (too_many_workers(pool)) {
1842                struct worker *worker;
1843                unsigned long expires;
1844
1845                /* idle_list is kept in LIFO order, check the last one */
1846                worker = list_entry(pool->idle_list.prev, struct worker, entry);
1847                expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1848
1849                if (time_before(jiffies, expires)) {
1850                        mod_timer(&pool->idle_timer, expires);
1851                        break;
1852                }
1853
1854                destroy_worker(worker);
1855        }
1856
1857        spin_unlock_irq(&pool->lock);
1858}
1859
1860static void send_mayday(struct work_struct *work)
1861{
1862        struct pool_workqueue *pwq = get_work_pwq(work);
1863        struct workqueue_struct *wq = pwq->wq;
1864
1865        lockdep_assert_held(&wq_mayday_lock);
1866
1867        if (!wq->rescuer)
1868                return;
1869
1870        /* mayday mayday mayday */
1871        if (list_empty(&pwq->mayday_node)) {
1872                /*
1873                 * If @pwq is for an unbound wq, its base ref may be put at
1874                 * any time due to an attribute change.  Pin @pwq until the
1875                 * rescuer is done with it.
1876                 */
1877                get_pwq(pwq);
1878                list_add_tail(&pwq->mayday_node, &wq->maydays);
1879                wake_up_process(wq->rescuer->task);
1880        }
1881}
1882
1883static void pool_mayday_timeout(unsigned long __pool)
1884{
1885        struct worker_pool *pool = (void *)__pool;
1886        struct work_struct *work;
1887
1888        spin_lock_irq(&pool->lock);
1889        spin_lock(&wq_mayday_lock);             /* for wq->maydays */
1890
1891        if (need_to_create_worker(pool)) {
1892                /*
1893                 * We've been trying to create a new worker but
1894                 * haven't been successful.  We might be hitting an
1895                 * allocation deadlock.  Send distress signals to
1896                 * rescuers.
1897                 */
1898                list_for_each_entry(work, &pool->worklist, entry)
1899                        send_mayday(work);
1900        }
1901
1902        spin_unlock(&wq_mayday_lock);
1903        spin_unlock_irq(&pool->lock);
1904
1905        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1906}
1907
1908/**
1909 * maybe_create_worker - create a new worker if necessary
1910 * @pool: pool to create a new worker for
1911 *
1912 * Create a new worker for @pool if necessary.  @pool is guaranteed to
1913 * have at least one idle worker on return from this function.  If
1914 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1915 * sent to all rescuers with works scheduled on @pool to resolve
1916 * possible allocation deadlock.
1917 *
1918 * On return, need_to_create_worker() is guaranteed to be %false and
1919 * may_start_working() %true.
1920 *
1921 * LOCKING:
1922 * spin_lock_irq(pool->lock) which may be released and regrabbed
1923 * multiple times.  Does GFP_KERNEL allocations.  Called only from
1924 * manager.
1925 */
1926static void maybe_create_worker(struct worker_pool *pool)
1927__releases(&pool->lock)
1928__acquires(&pool->lock)
1929{
1930restart:
1931        spin_unlock_irq(&pool->lock);
1932
1933        /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1934        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1935
1936        while (true) {
1937                if (create_worker(pool) || !need_to_create_worker(pool))
1938                        break;
1939
1940                schedule_timeout_interruptible(CREATE_COOLDOWN);
1941
1942                if (!need_to_create_worker(pool))
1943                        break;
1944        }
1945
1946        del_timer_sync(&pool->mayday_timer);
1947        spin_lock_irq(&pool->lock);
1948        /*
1949         * This is necessary even after a new worker was just successfully
1950         * created as @pool->lock was dropped and the new worker might have
1951         * already become busy.
1952         */
1953        if (need_to_create_worker(pool))
1954                goto restart;
1955}
1956
1957/**
1958 * manage_workers - manage worker pool
1959 * @worker: self
1960 *
1961 * Assume the manager role and manage the worker pool @worker belongs
1962 * to.  At any given time, there can be only zero or one manager per
1963 * pool.  The exclusion is handled automatically by this function.
1964 *
1965 * The caller can safely start processing works on false return.  On
1966 * true return, it's guaranteed that need_to_create_worker() is false
1967 * and may_start_working() is true.
1968 *
1969 * CONTEXT:
1970 * spin_lock_irq(pool->lock) which may be released and regrabbed
1971 * multiple times.  Does GFP_KERNEL allocations.
1972 *
1973 * Return:
1974 * %false if the pool doesn't need management and the caller can safely
1975 * start processing works, %true if management function was performed and
1976 * the conditions that the caller verified before calling the function may
1977 * no longer be true.
1978 */
1979static bool manage_workers(struct worker *worker)
1980{
1981        struct worker_pool *pool = worker->pool;
1982
1983        /*
1984         * Anyone who successfully grabs manager_arb wins the arbitration
1985         * and becomes the manager.  mutex_trylock() on pool->manager_arb
1986         * failure while holding pool->lock reliably indicates that someone
1987         * else is managing the pool and the worker which failed trylock
1988         * can proceed to executing work items.  This means that anyone
1989         * grabbing manager_arb is responsible for actually performing
1990         * manager duties.  If manager_arb is grabbed and released without
1991         * actual management, the pool may stall indefinitely.
1992         */
1993        if (!mutex_trylock(&pool->manager_arb))
1994                return false;
1995        pool->manager = worker;
1996
1997        maybe_create_worker(pool);
1998
1999        pool->manager = NULL;
2000        mutex_unlock(&pool->manager_arb);
2001        return true;
2002}
2003
2004/**
2005 * process_one_work - process single work
2006 * @worker: self
2007 * @work: work to process
2008 *
2009 * Process @work.  This function contains all the logics necessary to
2010 * process a single work including synchronization against and
2011 * interaction with other workers on the same cpu, queueing and
2012 * flushing.  As long as context requirement is met, any worker can
2013 * call this function to process a work.
2014 *
2015 * CONTEXT:
2016 * spin_lock_irq(pool->lock) which is released and regrabbed.
2017 */
2018static void process_one_work(struct worker *worker, struct work_struct *work)
2019__releases(&pool->lock)
2020__acquires(&pool->lock)
2021{
2022        struct pool_workqueue *pwq = get_work_pwq(work);
2023        struct worker_pool *pool = worker->pool;
2024        bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2025        int work_color;
2026        struct worker *collision;
2027#ifdef CONFIG_LOCKDEP
2028        /*
2029         * It is permissible to free the struct work_struct from
2030         * inside the function that is called from it, this we need to
2031         * take into account for lockdep too.  To avoid bogus "held
2032         * lock freed" warnings as well as problems when looking into
2033         * work->lockdep_map, make a copy and use that here.
2034         */
2035        struct lockdep_map lockdep_map;
2036
2037        lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2038#endif
2039        /* ensure we're on the correct CPU */
2040        WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2041                     raw_smp_processor_id() != pool->cpu);
2042
2043        /*
2044         * A single work shouldn't be executed concurrently by
2045         * multiple workers on a single cpu.  Check whether anyone is
2046         * already processing the work.  If so, defer the work to the
2047         * currently executing one.
2048         */
2049        collision = find_worker_executing_work(pool, work);
2050        if (unlikely(collision)) {
2051                move_linked_works(work, &collision->scheduled, NULL);
2052                return;
2053        }
2054
2055        /* claim and dequeue */
2056        debug_work_deactivate(work);
2057        hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2058        worker->current_work = work;
2059        worker->current_func = work->func;
2060        worker->current_pwq = pwq;
2061        work_color = get_work_color(work);
2062
2063        list_del_init(&work->entry);
2064
2065        /*
2066         * CPU intensive works don't participate in concurrency management.
2067         * They're the scheduler's responsibility.  This takes @worker out
2068         * of concurrency management and the next code block will chain
2069         * execution of the pending work items.
2070         */
2071        if (unlikely(cpu_intensive))
2072                worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2073
2074        /*
2075         * Wake up another worker if necessary.  The condition is always
2076         * false for normal per-cpu workers since nr_running would always
2077         * be >= 1 at this point.  This is used to chain execution of the
2078         * pending work items for WORKER_NOT_RUNNING workers such as the
2079         * UNBOUND and CPU_INTENSIVE ones.
2080         */
2081        if (need_more_worker(pool))
2082                wake_up_worker(pool);
2083
2084        /*
2085         * Record the last pool and clear PENDING which should be the last
2086         * update to @work.  Also, do this inside @pool->lock so that
2087         * PENDING and queued state changes happen together while IRQ is
2088         * disabled.
2089         */
2090        set_work_pool_and_clear_pending(work, pool->id);
2091
2092        spin_unlock_irq(&pool->lock);
2093
2094        lock_map_acquire_read(&pwq->wq->lockdep_map);
2095        lock_map_acquire(&lockdep_map);
2096        trace_workqueue_execute_start(work);
2097        worker->current_func(work);
2098        /*
2099         * While we must be careful to not use "work" after this, the trace
2100         * point will only record its address.
2101         */
2102        trace_workqueue_execute_end(work);
2103        lock_map_release(&lockdep_map);
2104        lock_map_release(&pwq->wq->lockdep_map);
2105
2106        if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2107                pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2108                       "     last function: %pf\n",
2109                       current->comm, preempt_count(), task_pid_nr(current),
2110                       worker->current_func);
2111                debug_show_held_locks(current);
2112                dump_stack();
2113        }
2114
2115        /*
2116         * The following prevents a kworker from hogging CPU on !PREEMPT
2117         * kernels, where a requeueing work item waiting for something to
2118         * happen could deadlock with stop_machine as such work item could
2119         * indefinitely requeue itself while all other CPUs are trapped in
2120         * stop_machine. At the same time, report a quiescent RCU state so
2121         * the same condition doesn't freeze RCU.
2122         */
2123        cond_resched_rcu_qs();
2124
2125        spin_lock_irq(&pool->lock);
2126
2127        /* clear cpu intensive status */
2128        if (unlikely(cpu_intensive))
2129                worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2130
2131        /* we're done with it, release */
2132        hash_del(&worker->hentry);
2133        worker->current_work = NULL;
2134        worker->current_func = NULL;
2135        worker->current_pwq = NULL;
2136        worker->desc_valid = false;
2137        pwq_dec_nr_in_flight(pwq, work_color);
2138}
2139
2140/**
2141 * process_scheduled_works - process scheduled works
2142 * @worker: self
2143 *
2144 * Process all scheduled works.  Please note that the scheduled list
2145 * may change while processing a work, so this function repeatedly
2146 * fetches a work from the top and executes it.
2147 *
2148 * CONTEXT:
2149 * spin_lock_irq(pool->lock) which may be released and regrabbed
2150 * multiple times.
2151 */
2152static void process_scheduled_works(struct worker *worker)
2153{
2154        while (!list_empty(&worker->scheduled)) {
2155                struct work_struct *work = list_first_entry(&worker->scheduled,
2156                                                struct work_struct, entry);
2157                process_one_work(worker, work);
2158        }
2159}
2160
2161/**
2162 * worker_thread - the worker thread function
2163 * @__worker: self
2164 *
2165 * The worker thread function.  All workers belong to a worker_pool -
2166 * either a per-cpu one or dynamic unbound one.  These workers process all
2167 * work items regardless of their specific target workqueue.  The only
2168 * exception is work items which belong to workqueues with a rescuer which
2169 * will be explained in rescuer_thread().
2170 *
2171 * Return: 0
2172 */
2173static int worker_thread(void *__worker)
2174{
2175        struct worker *worker = __worker;
2176        struct worker_pool *pool = worker->pool;
2177
2178        /* tell the scheduler that this is a workqueue worker */
2179        worker->task->flags |= PF_WQ_WORKER;
2180woke_up:
2181        spin_lock_irq(&pool->lock);
2182
2183        /* am I supposed to die? */
2184        if (unlikely(worker->flags & WORKER_DIE)) {
2185                spin_unlock_irq(&pool->lock);
2186                WARN_ON_ONCE(!list_empty(&worker->entry));
2187                worker->task->flags &= ~PF_WQ_WORKER;
2188
2189                set_task_comm(worker->task, "kworker/dying");
2190                ida_simple_remove(&pool->worker_ida, worker->id);
2191                worker_detach_from_pool(worker, pool);
2192                kfree(worker);
2193                return 0;
2194        }
2195
2196        worker_leave_idle(worker);
2197recheck:
2198        /* no more worker necessary? */
2199        if (!need_more_worker(pool))
2200                goto sleep;
2201
2202        /* do we need to manage? */
2203        if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2204                goto recheck;
2205
2206        /*
2207         * ->scheduled list can only be filled while a worker is
2208         * preparing to process a work or actually processing it.
2209         * Make sure nobody diddled with it while I was sleeping.
2210         */
2211        WARN_ON_ONCE(!list_empty(&worker->scheduled));
2212
2213        /*
2214         * Finish PREP stage.  We're guaranteed to have at least one idle
2215         * worker or that someone else has already assumed the manager
2216         * role.  This is where @worker starts participating in concurrency
2217         * management if applicable and concurrency management is restored
2218         * after being rebound.  See rebind_workers() for details.
2219         */
2220        worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2221
2222        do {
2223                struct work_struct *work =
2224                        list_first_entry(&pool->worklist,
2225                                         struct work_struct, entry);
2226
2227                pool->watchdog_ts = jiffies;
2228
2229                if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2230                        /* optimization path, not strictly necessary */
2231                        process_one_work(worker, work);
2232                        if (unlikely(!list_empty(&worker->scheduled)))
2233                                process_scheduled_works(worker);
2234                } else {
2235                        move_linked_works(work, &worker->scheduled, NULL);
2236                        process_scheduled_works(worker);
2237                }
2238        } while (keep_working(pool));
2239
2240        worker_set_flags(worker, WORKER_PREP);
2241sleep:
2242        /*
2243         * pool->lock is held and there's no work to process and no need to
2244         * manage, sleep.  Workers are woken up only while holding
2245         * pool->lock or from local cpu, so setting the current state
2246         * before releasing pool->lock is enough to prevent losing any
2247         * event.
2248         */
2249        worker_enter_idle(worker);
2250        __set_current_state(TASK_INTERRUPTIBLE);
2251        spin_unlock_irq(&pool->lock);
2252        schedule();
2253        goto woke_up;
2254}
2255
2256/**
2257 * rescuer_thread - the rescuer thread function
2258 * @__rescuer: self
2259 *
2260 * Workqueue rescuer thread function.  There's one rescuer for each
2261 * workqueue which has WQ_MEM_RECLAIM set.
2262 *
2263 * Regular work processing on a pool may block trying to create a new
2264 * worker which uses GFP_KERNEL allocation which has slight chance of
2265 * developing into deadlock if some works currently on the same queue
2266 * need to be processed to satisfy the GFP_KERNEL allocation.  This is
2267 * the problem rescuer solves.
2268 *
2269 * When such condition is possible, the pool summons rescuers of all
2270 * workqueues which have works queued on the pool and let them process
2271 * those works so that forward progress can be guaranteed.
2272 *
2273 * This should happen rarely.
2274 *
2275 * Return: 0
2276 */
2277static int rescuer_thread(void *__rescuer)
2278{
2279        struct worker *rescuer = __rescuer;
2280        struct workqueue_struct *wq = rescuer->rescue_wq;
2281        struct list_head *scheduled = &rescuer->scheduled;
2282        bool should_stop;
2283
2284        set_user_nice(current, RESCUER_NICE_LEVEL);
2285
2286        /*
2287         * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
2288         * doesn't participate in concurrency management.
2289         */
2290        rescuer->task->flags |= PF_WQ_WORKER;
2291repeat:
2292        set_current_state(TASK_INTERRUPTIBLE);
2293
2294        /*
2295         * By the time the rescuer is requested to stop, the workqueue
2296         * shouldn't have any work pending, but @wq->maydays may still have
2297         * pwq(s) queued.  This can happen by non-rescuer workers consuming
2298         * all the work items before the rescuer got to them.  Go through
2299         * @wq->maydays processing before acting on should_stop so that the
2300         * list is always empty on exit.
2301         */
2302        should_stop = kthread_should_stop();
2303
2304        /* see whether any pwq is asking for help */
2305        spin_lock_irq(&wq_mayday_lock);
2306
2307        while (!list_empty(&wq->maydays)) {
2308                struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2309                                        struct pool_workqueue, mayday_node);
2310                struct worker_pool *pool = pwq->pool;
2311                struct work_struct *work, *n;
2312                bool first = true;
2313
2314                __set_current_state(TASK_RUNNING);
2315                list_del_init(&pwq->mayday_node);
2316
2317                spin_unlock_irq(&wq_mayday_lock);
2318
2319                worker_attach_to_pool(rescuer, pool);
2320
2321                spin_lock_irq(&pool->lock);
2322                rescuer->pool = pool;
2323
2324                /*
2325                 * Slurp in all works issued via this workqueue and
2326                 * process'em.
2327                 */
2328                WARN_ON_ONCE(!list_empty(scheduled));
2329                list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2330                        if (get_work_pwq(work) == pwq) {
2331                                if (first)
2332                                        pool->watchdog_ts = jiffies;
2333                                move_linked_works(work, scheduled, &n);
2334                        }
2335                        first = false;
2336                }
2337
2338                if (!list_empty(scheduled)) {
2339                        process_scheduled_works(rescuer);
2340
2341                        /*
2342                         * The above execution of rescued work items could
2343                         * have created more to rescue through
2344                         * pwq_activate_first_delayed() or chained
2345                         * queueing.  Let's put @pwq back on mayday list so
2346                         * that such back-to-back work items, which may be
2347                         * being used to relieve memory pressure, don't
2348                         * incur MAYDAY_INTERVAL delay inbetween.
2349                         */
2350                        if (need_to_create_worker(pool)) {
2351                                spin_lock(&wq_mayday_lock);
2352                                get_pwq(pwq);
2353                                list_move_tail(&pwq->mayday_node, &wq->maydays);
2354                                spin_unlock(&wq_mayday_lock);
2355                        }
2356                }
2357
2358                /*
2359                 * Put the reference grabbed by send_mayday().  @pool won't
2360                 * go away while we're still attached to it.
2361                 */
2362                put_pwq(pwq);
2363
2364                /*
2365                 * Leave this pool.  If need_more_worker() is %true, notify a
2366                 * regular worker; otherwise, we end up with 0 concurrency
2367                 * and stalling the execution.
2368                 */
2369                if (need_more_worker(pool))
2370                        wake_up_worker(pool);
2371
2372                rescuer->pool = NULL;
2373                spin_unlock_irq(&pool->lock);
2374
2375                worker_detach_from_pool(rescuer, pool);
2376
2377                spin_lock_irq(&wq_mayday_lock);
2378        }
2379
2380        spin_unlock_irq(&wq_mayday_lock);
2381
2382        if (should_stop) {
2383                __set_current_state(TASK_RUNNING);
2384                rescuer->task->flags &= ~PF_WQ_WORKER;
2385                return 0;
2386        }
2387
2388        /* rescuers should never participate in concurrency management */
2389        WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2390        schedule();
2391        goto repeat;
2392}
2393
2394/**
2395 * check_flush_dependency - check for flush dependency sanity
2396 * @target_wq: workqueue being flushed
2397 * @target_work: work item being flushed (NULL for workqueue flushes)
2398 *
2399 * %current is trying to flush the whole @target_wq or @target_work on it.
2400 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2401 * reclaiming memory or running on a workqueue which doesn't have
2402 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2403 * a deadlock.
2404 */
2405static void check_flush_dependency(struct workqueue_struct *target_wq,
2406                                   struct work_struct *target_work)
2407{
2408        work_func_t target_func = target_work ? target_work->func : NULL;
2409        struct worker *worker;
2410
2411        if (target_wq->flags & WQ_MEM_RECLAIM)
2412                return;
2413
2414        worker = current_wq_worker();
2415
2416        WARN_ONCE(current->flags & PF_MEMALLOC,
2417                  "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf",
2418                  current->pid, current->comm, target_wq->name, target_func);
2419        WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2420                              (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2421                  "workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf",
2422                  worker->current_pwq->wq->name, worker->current_func,
2423                  target_wq->name, target_func);
2424}
2425
2426struct wq_barrier {
2427        struct work_struct      work;
2428        struct completion       done;
2429        struct task_struct      *task;  /* purely informational */
2430};
2431
2432static void wq_barrier_func(struct work_struct *work)
2433{
2434        struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2435        complete(&barr->done);
2436}
2437
2438/**
2439 * insert_wq_barrier - insert a barrier work
2440 * @pwq: pwq to insert barrier into
2441 * @barr: wq_barrier to insert
2442 * @target: target work to attach @barr to
2443 * @worker: worker currently executing @target, NULL if @target is not executing
2444 *
2445 * @barr is linked to @target such that @barr is completed only after
2446 * @target finishes execution.  Please note that the ordering
2447 * guarantee is observed only with respect to @target and on the local
2448 * cpu.
2449 *
2450 * Currently, a queued barrier can't be canceled.  This is because
2451 * try_to_grab_pending() can't determine whether the work to be
2452 * grabbed is at the head of the queue and thus can't clear LINKED
2453 * flag of the previous work while there must be a valid next work
2454 * after a work with LINKED flag set.
2455 *
2456 * Note that when @worker is non-NULL, @target may be modified
2457 * underneath us, so we can't reliably determine pwq from @target.
2458 *
2459 * CONTEXT:
2460 * spin_lock_irq(pool->lock).
2461 */
2462static void insert_wq_barrier(struct pool_workqueue *pwq,
2463                              struct wq_barrier *barr,
2464                              struct work_struct *target, struct worker *worker)
2465{
2466        struct list_head *head;
2467        unsigned int linked = 0;
2468
2469        /*
2470         * debugobject calls are safe here even with pool->lock locked
2471         * as we know for sure that this will not trigger any of the
2472         * checks and call back into the fixup functions where we
2473         * might deadlock.
2474         */
2475        INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2476        __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2477        init_completion(&barr->done);
2478        barr->task = current;
2479
2480        /*
2481         * If @target is currently being executed, schedule the
2482         * barrier to the worker; otherwise, put it after @target.
2483         */
2484        if (worker)
2485                head = worker->scheduled.next;
2486        else {
2487                unsigned long *bits = work_data_bits(target);
2488
2489                head = target->entry.next;
2490                /* there can already be other linked works, inherit and set */
2491                linked = *bits & WORK_STRUCT_LINKED;
2492                __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2493        }
2494
2495        debug_work_activate(&barr->work);
2496        insert_work(pwq, &barr->work, head,
2497                    work_color_to_flags(WORK_NO_COLOR) | linked);
2498}
2499
2500/**
2501 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2502 * @wq: workqueue being flushed
2503 * @flush_color: new flush color, < 0 for no-op
2504 * @work_color: new work color, < 0 for no-op
2505 *
2506 * Prepare pwqs for workqueue flushing.
2507 *
2508 * If @flush_color is non-negative, flush_color on all pwqs should be
2509 * -1.  If no pwq has in-flight commands at the specified color, all
2510 * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
2511 * has in flight commands, its pwq->flush_color is set to
2512 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2513 * wakeup logic is armed and %true is returned.
2514 *
2515 * The caller should have initialized @wq->first_flusher prior to
2516 * calling this function with non-negative @flush_color.  If
2517 * @flush_color is negative, no flush color update is done and %false
2518 * is returned.
2519 *
2520 * If @work_color is non-negative, all pwqs should have the same
2521 * work_color which is previous to @work_color and all will be
2522 * advanced to @work_color.
2523 *
2524 * CONTEXT:
2525 * mutex_lock(wq->mutex).
2526 *
2527 * Return:
2528 * %true if @flush_color >= 0 and there's something to flush.  %false
2529 * otherwise.
2530 */
2531static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2532                                      int flush_color, int work_color)
2533{
2534        bool wait = false;
2535        struct pool_workqueue *pwq;
2536
2537        if (flush_color >= 0) {
2538                WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2539                atomic_set(&wq->nr_pwqs_to_flush, 1);
2540        }
2541
2542        for_each_pwq(pwq, wq) {
2543                struct worker_pool *pool = pwq->pool;
2544
2545                spin_lock_irq(&pool->lock);
2546
2547                if (flush_color >= 0) {
2548                        WARN_ON_ONCE(pwq->flush_color != -1);
2549
2550                        if (pwq->nr_in_flight[flush_color]) {
2551                                pwq->flush_color = flush_color;
2552                                atomic_inc(&wq->nr_pwqs_to_flush);
2553                                wait = true;
2554                        }
2555                }
2556
2557                if (work_color >= 0) {
2558                        WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2559                        pwq->work_color = work_color;
2560                }
2561
2562                spin_unlock_irq(&pool->lock);
2563        }
2564
2565        if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2566                complete(&wq->first_flusher->done);
2567
2568        return wait;
2569}
2570
2571/**
2572 * flush_workqueue - ensure that any scheduled work has run to completion.
2573 * @wq: workqueue to flush
2574 *
2575 * This function sleeps until all work items which were queued on entry
2576 * have finished execution, but it is not livelocked by new incoming ones.
2577 */
2578void flush_workqueue(struct workqueue_struct *wq)
2579{
2580        struct wq_flusher this_flusher = {
2581                .list = LIST_HEAD_INIT(this_flusher.list),
2582                .flush_color = -1,
2583                .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2584        };
2585        int next_color;
2586
2587        if (WARN_ON(!wq_online))
2588                return;
2589
2590        lock_map_acquire(&wq->lockdep_map);
2591        lock_map_release(&wq->lockdep_map);
2592
2593        mutex_lock(&wq->mutex);
2594
2595        /*
2596         * Start-to-wait phase
2597         */
2598        next_color = work_next_color(wq->work_color);
2599
2600        if (next_color != wq->flush_color) {
2601                /*
2602                 * Color space is not full.  The current work_color
2603                 * becomes our flush_color and work_color is advanced
2604                 * by one.
2605                 */
2606                WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2607                this_flusher.flush_color = wq->work_color;
2608                wq->work_color = next_color;
2609
2610                if (!wq->first_flusher) {
2611                        /* no flush in progress, become the first flusher */
2612                        WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2613
2614                        wq->first_flusher = &this_flusher;
2615
2616                        if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2617                                                       wq->work_color)) {
2618                                /* nothing to flush, done */
2619                                wq->flush_color = next_color;
2620                                wq->first_flusher = NULL;
2621                                goto out_unlock;
2622                        }
2623                } else {
2624                        /* wait in queue */
2625                        WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2626                        list_add_tail(&this_flusher.list, &wq->flusher_queue);
2627                        flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2628                }
2629        } else {
2630                /*
2631                 * Oops, color space is full, wait on overflow queue.
2632                 * The next flush completion will assign us
2633                 * flush_color and transfer to flusher_queue.
2634                 */
2635                list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2636        }
2637
2638        check_flush_dependency(wq, NULL);
2639
2640        mutex_unlock(&wq->mutex);
2641
2642        wait_for_completion(&this_flusher.done);
2643
2644        /*
2645         * Wake-up-and-cascade phase
2646         *
2647         * First flushers are responsible for cascading flushes and
2648         * handling overflow.  Non-first flushers can simply return.
2649         */
2650        if (wq->first_flusher != &this_flusher)
2651                return;
2652
2653        mutex_lock(&wq->mutex);
2654
2655        /* we might have raced, check again with mutex held */
2656        if (wq->first_flusher != &this_flusher)
2657                goto out_unlock;
2658
2659        wq->first_flusher = NULL;
2660
2661        WARN_ON_ONCE(!list_empty(&this_flusher.list));
2662        WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2663
2664        while (true) {
2665                struct wq_flusher *next, *tmp;
2666
2667                /* complete all the flushers sharing the current flush color */
2668                list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2669                        if (next->flush_color != wq->flush_color)
2670                                break;
2671                        list_del_init(&next->list);
2672                        complete(&next->done);
2673                }
2674
2675                WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2676                             wq->flush_color != work_next_color(wq->work_color));
2677
2678                /* this flush_color is finished, advance by one */
2679                wq->flush_color = work_next_color(wq->flush_color);
2680
2681                /* one color has been freed, handle overflow queue */
2682                if (!list_empty(&wq->flusher_overflow)) {
2683                        /*
2684                         * Assign the same color to all overflowed
2685                         * flushers, advance work_color and append to
2686                         * flusher_queue.  This is the start-to-wait
2687                         * phase for these overflowed flushers.
2688                         */
2689                        list_for_each_entry(tmp, &wq->flusher_overflow, list)
2690                                tmp->flush_color = wq->work_color;
2691
2692                        wq->work_color = work_next_color(wq->work_color);
2693
2694                        list_splice_tail_init(&wq->flusher_overflow,
2695                                              &wq->flusher_queue);
2696                        flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2697                }
2698
2699                if (list_empty(&wq->flusher_queue)) {
2700                        WARN_ON_ONCE(wq->flush_color != wq->work_color);
2701                        break;
2702                }
2703
2704                /*
2705                 * Need to flush more colors.  Make the next flusher
2706                 * the new first flusher and arm pwqs.
2707                 */
2708                WARN_ON_ONCE(wq->flush_color == wq->work_color);
2709                WARN_ON_ONCE(wq->flush_color != next->flush_color);
2710
2711                list_del_init(&next->list);
2712                wq->first_flusher = next;
2713
2714                if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2715                        break;
2716
2717                /*
2718                 * Meh... this color is already done, clear first
2719                 * flusher and repeat cascading.
2720                 */
2721                wq->first_flusher = NULL;
2722        }
2723
2724out_unlock:
2725        mutex_unlock(&wq->mutex);
2726}
2727EXPORT_SYMBOL(flush_workqueue);
2728
2729/**
2730 * drain_workqueue - drain a workqueue
2731 * @wq: workqueue to drain
2732 *
2733 * Wait until the workqueue becomes empty.  While draining is in progress,
2734 * only chain queueing is allowed.  IOW, only currently pending or running
2735 * work items on @wq can queue further work items on it.  @wq is flushed
2736 * repeatedly until it becomes empty.  The number of flushing is determined
2737 * by the depth of chaining and should be relatively short.  Whine if it
2738 * takes too long.
2739 */
2740void drain_workqueue(struct workqueue_struct *wq)
2741{
2742        unsigned int flush_cnt = 0;
2743        struct pool_workqueue *pwq;
2744
2745        /*
2746         * __queue_work() needs to test whether there are drainers, is much
2747         * hotter than drain_workqueue() and already looks at @wq->flags.
2748         * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2749         */
2750        mutex_lock(&wq->mutex);
2751        if (!wq->nr_drainers++)
2752                wq->flags |= __WQ_DRAINING;
2753        mutex_unlock(&wq->mutex);
2754reflush:
2755        flush_workqueue(wq);
2756
2757        mutex_lock(&wq->mutex);
2758
2759        for_each_pwq(pwq, wq) {
2760                bool drained;
2761
2762                spin_lock_irq(&pwq->pool->lock);
2763                drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2764                spin_unlock_irq(&pwq->pool->lock);
2765
2766                if (drained)
2767                        continue;
2768
2769                if (++flush_cnt == 10 ||
2770                    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2771                        pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
2772                                wq->name, flush_cnt);
2773
2774                mutex_unlock(&wq->mutex);
2775                goto reflush;
2776        }
2777
2778        if (!--wq->nr_drainers)
2779                wq->flags &= ~__WQ_DRAINING;
2780        mutex_unlock(&wq->mutex);
2781}
2782EXPORT_SYMBOL_GPL(drain_workqueue);
2783
2784static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2785{
2786        struct worker *worker = NULL;
2787        struct worker_pool *pool;
2788        struct pool_workqueue *pwq;
2789
2790        might_sleep();
2791
2792        local_irq_disable();
2793        pool = get_work_pool(work);
2794        if (!pool) {
2795                local_irq_enable();
2796                return false;
2797        }
2798
2799        spin_lock(&pool->lock);
2800        /* see the comment in try_to_grab_pending() with the same code */
2801        pwq = get_work_pwq(work);
2802        if (pwq) {
2803                if (unlikely(pwq->pool != pool))
2804                        goto already_gone;
2805        } else {
2806                worker = find_worker_executing_work(pool, work);
2807                if (!worker)
2808                        goto already_gone;
2809                pwq = worker->current_pwq;
2810        }
2811
2812        check_flush_dependency(pwq->wq, work);
2813
2814        insert_wq_barrier(pwq, barr, work, worker);
2815        spin_unlock_irq(&pool->lock);
2816
2817        /*
2818         * If @max_active is 1 or rescuer is in use, flushing another work
2819         * item on the same workqueue may lead to deadlock.  Make sure the
2820         * flusher is not running on the same workqueue by verifying write
2821         * access.
2822         */
2823        if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)
2824                lock_map_acquire(&pwq->wq->lockdep_map);
2825        else
2826                lock_map_acquire_read(&pwq->wq->lockdep_map);
2827        lock_map_release(&pwq->wq->lockdep_map);
2828
2829        return true;
2830already_gone:
2831        spin_unlock_irq(&pool->lock);
2832        return false;
2833}
2834
2835/**
2836 * flush_work - wait for a work to finish executing the last queueing instance
2837 * @work: the work to flush
2838 *
2839 * Wait until @work has finished execution.  @work is guaranteed to be idle
2840 * on return if it hasn't been requeued since flush started.
2841 *
2842 * Return:
2843 * %true if flush_work() waited for the work to finish execution,
2844 * %false if it was already idle.
2845 */
2846bool flush_work(struct work_struct *work)
2847{
2848        struct wq_barrier barr;
2849
2850        if (WARN_ON(!wq_online))
2851                return false;
2852
2853        lock_map_acquire(&work->lockdep_map);
2854        lock_map_release(&work->lockdep_map);
2855
2856        if (start_flush_work(work, &barr)) {
2857                wait_for_completion(&barr.done);
2858                destroy_work_on_stack(&barr.work);
2859                return true;
2860        } else {
2861                return false;
2862        }
2863}
2864EXPORT_SYMBOL_GPL(flush_work);
2865
2866struct cwt_wait {
2867        wait_queue_entry_t              wait;
2868        struct work_struct      *work;
2869};
2870
2871static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
2872{
2873        struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
2874
2875        if (cwait->work != key)
2876                return 0;
2877        return autoremove_wake_function(wait, mode, sync, key);
2878}
2879
2880static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2881{
2882        static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
2883        unsigned long flags;
2884        int ret;
2885
2886        do {
2887                ret = try_to_grab_pending(work, is_dwork, &flags);
2888                /*
2889                 * If someone else is already canceling, wait for it to
2890                 * finish.  flush_work() doesn't work for PREEMPT_NONE
2891                 * because we may get scheduled between @work's completion
2892                 * and the other canceling task resuming and clearing
2893                 * CANCELING - flush_work() will return false immediately
2894                 * as @work is no longer busy, try_to_grab_pending() will
2895                 * return -ENOENT as @work is still being canceled and the
2896                 * other canceling task won't be able to clear CANCELING as
2897                 * we're hogging the CPU.
2898                 *
2899                 * Let's wait for completion using a waitqueue.  As this
2900                 * may lead to the thundering herd problem, use a custom
2901                 * wake function which matches @work along with exclusive
2902                 * wait and wakeup.
2903                 */
2904                if (unlikely(ret == -ENOENT)) {
2905                        struct cwt_wait cwait;
2906
2907                        init_wait(&cwait.wait);
2908                        cwait.wait.func = cwt_wakefn;
2909                        cwait.work = work;
2910
2911                        prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
2912                                                  TASK_UNINTERRUPTIBLE);
2913                        if (work_is_canceling(work))
2914                                schedule();
2915                        finish_wait(&cancel_waitq, &cwait.wait);
2916                }
2917        } while (unlikely(ret < 0));
2918
2919        /* tell other tasks trying to grab @work to back off */
2920        mark_work_canceling(work);
2921        local_irq_restore(flags);
2922
2923        /*
2924         * This allows canceling during early boot.  We know that @work
2925         * isn't executing.
2926         */
2927        if (wq_online)
2928                flush_work(work);
2929
2930        clear_work_data(work);
2931
2932        /*
2933         * Paired with prepare_to_wait() above so that either
2934         * waitqueue_active() is visible here or !work_is_canceling() is
2935         * visible there.
2936         */
2937        smp_mb();
2938        if (waitqueue_active(&cancel_waitq))
2939                __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
2940
2941        return ret;
2942}
2943
2944/**
2945 * cancel_work_sync - cancel a work and wait for it to finish
2946 * @work: the work to cancel
2947 *
2948 * Cancel @work and wait for its execution to finish.  This function
2949 * can be used even if the work re-queues itself or migrates to
2950 * another workqueue.  On return from this function, @work is
2951 * guaranteed to be not pending or executing on any CPU.
2952 *
2953 * cancel_work_sync(&delayed_work->work) must not be used for
2954 * delayed_work's.  Use cancel_delayed_work_sync() instead.
2955 *
2956 * The caller must ensure that the workqueue on which @work was last
2957 * queued can't be destroyed before this function returns.
2958 *
2959 * Return:
2960 * %true if @work was pending, %false otherwise.
2961 */
2962bool cancel_work_sync(struct work_struct *work)
2963{
2964        return __cancel_work_timer(work, false);
2965}
2966EXPORT_SYMBOL_GPL(cancel_work_sync);
2967
2968/**
2969 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2970 * @dwork: the delayed work to flush
2971 *
2972 * Delayed timer is cancelled and the pending work is queued for
2973 * immediate execution.  Like flush_work(), this function only
2974 * considers the last queueing instance of @dwork.
2975 *
2976 * Return:
2977 * %true if flush_work() waited for the work to finish execution,
2978 * %false if it was already idle.
2979 */
2980bool flush_delayed_work(struct delayed_work *dwork)
2981{
2982        local_irq_disable();
2983        if (del_timer_sync(&dwork->timer))
2984                __queue_work(dwork->cpu, dwork->wq, &dwork->work);
2985        local_irq_enable();
2986        return flush_work(&dwork->work);
2987}
2988EXPORT_SYMBOL(flush_delayed_work);
2989
2990static bool __cancel_work(struct work_struct *work, bool is_dwork)
2991{
2992        unsigned long flags;
2993        int ret;
2994
2995        do {
2996                ret = try_to_grab_pending(work, is_dwork, &flags);
2997        } while (unlikely(ret == -EAGAIN));
2998
2999        if (unlikely(ret < 0))
3000                return false;
3001
3002        set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3003        local_irq_restore(flags);
3004        return ret;
3005}
3006
3007/*
3008 * See cancel_delayed_work()
3009 */
3010bool cancel_work(struct work_struct *work)
3011{
3012        return __cancel_work(work, false);
3013}
3014
3015/**
3016 * cancel_delayed_work - cancel a delayed work
3017 * @dwork: delayed_work to cancel
3018 *
3019 * Kill off a pending delayed_work.
3020 *
3021 * Return: %true if @dwork was pending and canceled; %false if it wasn't
3022 * pending.
3023 *
3024 * Note:
3025 * The work callback function may still be running on return, unless
3026 * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
3027 * use cancel_delayed_work_sync() to wait on it.
3028 *
3029 * This function is safe to call from any context including IRQ handler.
3030 */
3031bool cancel_delayed_work(struct delayed_work *dwork)
3032{
3033        return __cancel_work(&dwork->work, true);
3034}
3035EXPORT_SYMBOL(cancel_delayed_work);
3036
3037/**
3038 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3039 * @dwork: the delayed work cancel
3040 *
3041 * This is cancel_work_sync() for delayed works.
3042 *
3043 * Return:
3044 * %true if @dwork was pending, %false otherwise.
3045 */
3046bool cancel_delayed_work_sync(struct delayed_work *dwork)
3047{
3048        return __cancel_work_timer(&dwork->work, true);
3049}
3050EXPORT_SYMBOL(cancel_delayed_work_sync);
3051
3052/**
3053 * schedule_on_each_cpu - execute a function synchronously on each online CPU
3054 * @func: the function to call
3055 *
3056 * schedule_on_each_cpu() executes @func on each online CPU using the
3057 * system workqueue and blocks until all CPUs have completed.
3058 * schedule_on_each_cpu() is very slow.
3059 *
3060 * Return:
3061 * 0 on success, -errno on failure.
3062 */
3063int schedule_on_each_cpu(work_func_t func)
3064{
3065        int cpu;
3066        struct work_struct __percpu *works;
3067
3068        works = alloc_percpu(struct work_struct);
3069        if (!works)
3070                return -ENOMEM;
3071
3072        get_online_cpus();
3073
3074        for_each_online_cpu(cpu) {
3075                struct work_struct *work = per_cpu_ptr(works, cpu);
3076
3077                INIT_WORK(work, func);
3078                schedule_work_on(cpu, work);
3079        }
3080
3081        for_each_online_cpu(cpu)
3082                flush_work(per_cpu_ptr(works, cpu));
3083
3084        put_online_cpus();
3085        free_percpu(works);
3086        return 0;
3087}
3088
3089/**
3090 * execute_in_process_context - reliably execute the routine with user context
3091 * @fn:         the function to execute
3092 * @ew:         guaranteed storage for the execute work structure (must
3093 *              be available when the work executes)
3094 *
3095 * Executes the function immediately if process context is available,
3096 * otherwise schedules the function for delayed execution.
3097 *
3098 * Return:      0 - function was executed
3099 *              1 - function was scheduled for execution
3100 */
3101int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3102{
3103        if (!in_interrupt()) {
3104                fn(&ew->work);
3105                return 0;
3106        }
3107
3108        INIT_WORK(&ew->work, fn);
3109        schedule_work(&ew->work);
3110
3111        return 1;
3112}
3113EXPORT_SYMBOL_GPL(execute_in_process_context);
3114
3115/**
3116 * free_workqueue_attrs - free a workqueue_attrs
3117 * @attrs: workqueue_attrs to free
3118 *
3119 * Undo alloc_workqueue_attrs().
3120 */
3121void free_workqueue_attrs(struct workqueue_attrs *attrs)
3122{
3123        if (attrs) {
3124                free_cpumask_var(attrs->cpumask);
3125                kfree(attrs);
3126        }
3127}
3128
3129/**
3130 * alloc_workqueue_attrs - allocate a workqueue_attrs
3131 * @gfp_mask: allocation mask to use
3132 *
3133 * Allocate a new workqueue_attrs, initialize with default settings and
3134 * return it.
3135 *
3136 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3137 */
3138struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask)
3139{
3140        struct workqueue_attrs *attrs;
3141
3142        attrs = kzalloc(sizeof(*attrs), gfp_mask);
3143        if (!attrs)
3144                goto fail;
3145        if (!alloc_cpumask_var(&attrs->cpumask, gfp_mask))
3146                goto fail;
3147
3148        cpumask_copy(attrs->cpumask, cpu_possible_mask);
3149        return attrs;
3150fail:
3151        free_workqueue_attrs(attrs);
3152        return NULL;
3153}
3154
3155static void copy_workqueue_attrs(struct workqueue_attrs *to,
3156                                 const struct workqueue_attrs *from)
3157{
3158        to->nice = from->nice;
3159        cpumask_copy(to->cpumask, from->cpumask);
3160        /*
3161         * Unlike hash and equality test, this function doesn't ignore
3162         * ->no_numa as it is used for both pool and wq attrs.  Instead,
3163         * get_unbound_pool() explicitly clears ->no_numa after copying.
3164         */
3165        to->no_numa = from->no_numa;
3166}
3167
3168/* hash value of the content of @attr */
3169static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3170{
3171        u32 hash = 0;
3172
3173        hash = jhash_1word(attrs->nice, hash);
3174        hash = jhash(cpumask_bits(attrs->cpumask),
3175                     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3176        return hash;
3177}
3178
3179/* content equality test */
3180static bool wqattrs_equal(const struct workqueue_attrs *a,
3181                          const struct workqueue_attrs *b)
3182{
3183        if (a->nice != b->nice)
3184                return false;
3185        if (!cpumask_equal(a->cpumask, b->cpumask))
3186                return false;
3187        return true;
3188}
3189
3190/**
3191 * init_worker_pool - initialize a newly zalloc'd worker_pool
3192 * @pool: worker_pool to initialize
3193 *
3194 * Initialize a newly zalloc'd @pool.  It also allocates @pool->attrs.
3195 *
3196 * Return: 0 on success, -errno on failure.  Even on failure, all fields
3197 * inside @pool proper are initialized and put_unbound_pool() can be called
3198 * on @pool safely to release it.
3199 */
3200static int init_worker_pool(struct worker_pool *pool)
3201{
3202        spin_lock_init(&pool->lock);
3203        pool->id = -1;
3204        pool->cpu = -1;
3205        pool->node = NUMA_NO_NODE;
3206        pool->flags |= POOL_DISASSOCIATED;
3207        pool->watchdog_ts = jiffies;
3208        INIT_LIST_HEAD(&pool->worklist);
3209        INIT_LIST_HEAD(&pool->idle_list);
3210        hash_init(pool->busy_hash);
3211
3212        setup_deferrable_timer(&pool->idle_timer, idle_worker_timeout,
3213                               (unsigned long)pool);
3214
3215        setup_timer(&pool->mayday_timer, pool_mayday_timeout,
3216                    (unsigned long)pool);
3217
3218        mutex_init(&pool->manager_arb);
3219        mutex_init(&pool->attach_mutex);
3220        INIT_LIST_HEAD(&pool->workers);
3221
3222        ida_init(&pool->worker_ida);
3223        INIT_HLIST_NODE(&pool->hash_node);
3224        pool->refcnt = 1;
3225
3226        /* shouldn't fail above this point */
3227        pool->attrs = alloc_workqueue_attrs(GFP_KERNEL);
3228        if (!pool->attrs)
3229                return -ENOMEM;
3230        return 0;
3231}
3232
3233static void rcu_free_wq(struct rcu_head *rcu)
3234{
3235        struct workqueue_struct *wq =
3236                container_of(rcu, struct workqueue_struct, rcu);
3237
3238        if (!(wq->flags & WQ_UNBOUND))
3239                free_percpu(wq->cpu_pwqs);
3240        else
3241                free_workqueue_attrs(wq->unbound_attrs);
3242
3243        kfree(wq->rescuer);
3244        kfree(wq);
3245}
3246
3247static void rcu_free_pool(struct rcu_head *rcu)
3248{
3249        struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3250
3251        ida_destroy(&pool->worker_ida);
3252        free_workqueue_attrs(pool->attrs);
3253        kfree(pool);
3254}
3255
3256/**
3257 * put_unbound_pool - put a worker_pool
3258 * @pool: worker_pool to put
3259 *
3260 * Put @pool.  If its refcnt reaches zero, it gets destroyed in sched-RCU
3261 * safe manner.  get_unbound_pool() calls this function on its failure path
3262 * and this function should be able to release pools which went through,
3263 * successfully or not, init_worker_pool().
3264 *
3265 * Should be called with wq_pool_mutex held.
3266 */
3267static void put_unbound_pool(struct worker_pool *pool)
3268{
3269        DECLARE_COMPLETION_ONSTACK(detach_completion);
3270        struct worker *worker;
3271
3272        lockdep_assert_held(&wq_pool_mutex);
3273
3274        if (--pool->refcnt)
3275                return;
3276
3277        /* sanity checks */
3278        if (WARN_ON(!(pool->cpu < 0)) ||
3279            WARN_ON(!list_empty(&pool->worklist)))
3280                return;
3281
3282        /* release id and unhash */
3283        if (pool->id >= 0)
3284                idr_remove(&worker_pool_idr, pool->id);
3285        hash_del(&pool->hash_node);
3286
3287        /*
3288         * Become the manager and destroy all workers.  Grabbing
3289         * manager_arb prevents @pool's workers from blocking on
3290         * attach_mutex.
3291         */
3292        mutex_lock(&pool->manager_arb);
3293
3294        spin_lock_irq(&pool->lock);
3295        while ((worker = first_idle_worker(pool)))
3296                destroy_worker(worker);
3297        WARN_ON(pool->nr_workers || pool->nr_idle);
3298        spin_unlock_irq(&pool->lock);
3299
3300        mutex_lock(&pool->attach_mutex);
3301        if (!list_empty(&pool->workers))
3302                pool->detach_completion = &detach_completion;
3303        mutex_unlock(&pool->attach_mutex);
3304
3305        if (pool->detach_completion)
3306                wait_for_completion(pool->detach_completion);
3307
3308        mutex_unlock(&pool->manager_arb);
3309
3310        /* shut down the timers */
3311        del_timer_sync(&pool->idle_timer);
3312        del_timer_sync(&pool->mayday_timer);
3313
3314        /* sched-RCU protected to allow dereferences from get_work_pool() */
3315        call_rcu_sched(&pool->rcu, rcu_free_pool);
3316}
3317
3318/**
3319 * get_unbound_pool - get a worker_pool with the specified attributes
3320 * @attrs: the attributes of the worker_pool to get
3321 *
3322 * Obtain a worker_pool which has the same attributes as @attrs, bump the
3323 * reference count and return it.  If there already is a matching
3324 * worker_pool, it will be used; otherwise, this function attempts to
3325 * create a new one.
3326 *
3327 * Should be called with wq_pool_mutex held.
3328 *
3329 * Return: On success, a worker_pool with the same attributes as @attrs.
3330 * On failure, %NULL.
3331 */
3332static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3333{
3334        u32 hash = wqattrs_hash(attrs);
3335        struct worker_pool *pool;
3336        int node;
3337        int target_node = NUMA_NO_NODE;
3338
3339        lockdep_assert_held(&wq_pool_mutex);
3340
3341        /* do we already have a matching pool? */
3342        hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3343                if (wqattrs_equal(pool->attrs, attrs)) {
3344                        pool->refcnt++;
3345                        return pool;
3346                }
3347        }
3348
3349        /* if cpumask is contained inside a NUMA node, we belong to that node */
3350        if (wq_numa_enabled) {
3351                for_each_node(node) {
3352                        if (cpumask_subset(attrs->cpumask,
3353                                           wq_numa_possible_cpumask[node])) {
3354                                target_node = node;
3355                                break;
3356                        }
3357                }
3358        }
3359
3360        /* nope, create a new one */
3361        pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
3362        if (!pool || init_worker_pool(pool) < 0)
3363                goto fail;
3364
3365        lockdep_set_subclass(&pool->lock, 1);   /* see put_pwq() */
3366        copy_workqueue_attrs(pool->attrs, attrs);
3367        pool->node = target_node;
3368
3369        /*
3370         * no_numa isn't a worker_pool attribute, always clear it.  See
3371         * 'struct workqueue_attrs' comments for detail.
3372         */
3373        pool->attrs->no_numa = false;
3374
3375        if (worker_pool_assign_id(pool) < 0)
3376                goto fail;
3377
3378        /* create and start the initial worker */
3379        if (wq_online && !create_worker(pool))
3380                goto fail;
3381
3382        /* install */
3383        hash_add(unbound_pool_hash, &pool->hash_node, hash);
3384
3385        return pool;
3386fail:
3387        if (pool)
3388                put_unbound_pool(pool);
3389        return NULL;
3390}
3391
3392static void rcu_free_pwq(struct rcu_head *rcu)
3393{
3394        kmem_cache_free(pwq_cache,
3395                        container_of(rcu, struct pool_workqueue, rcu));
3396}
3397
3398/*
3399 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3400 * and needs to be destroyed.
3401 */
3402static void pwq_unbound_release_workfn(struct work_struct *work)
3403{
3404        struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3405                                                  unbound_release_work);
3406        struct workqueue_struct *wq = pwq->wq;
3407        struct worker_pool *pool = pwq->pool;
3408        bool is_last;
3409
3410        if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3411                return;
3412
3413        mutex_lock(&wq->mutex);
3414        list_del_rcu(&pwq->pwqs_node);
3415        is_last = list_empty(&wq->pwqs);
3416        mutex_unlock(&wq->mutex);
3417
3418        mutex_lock(&wq_pool_mutex);
3419        put_unbound_pool(pool);
3420        mutex_unlock(&wq_pool_mutex);
3421
3422        call_rcu_sched(&pwq->rcu, rcu_free_pwq);
3423
3424        /*
3425         * If we're the last pwq going away, @wq is already dead and no one
3426         * is gonna access it anymore.  Schedule RCU free.
3427         */
3428        if (is_last)
3429                call_rcu_sched(&wq->rcu, rcu_free_wq);
3430}
3431
3432/**
3433 * pwq_adjust_max_active - update a pwq's max_active to the current setting
3434 * @pwq: target pool_workqueue
3435 *
3436 * If @pwq isn't freezing, set @pwq->max_active to the associated
3437 * workqueue's saved_max_active and activate delayed work items
3438 * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
3439 */
3440static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3441{
3442        struct workqueue_struct *wq = pwq->wq;
3443        bool freezable = wq->flags & WQ_FREEZABLE;
3444        unsigned long flags;
3445
3446        /* for @wq->saved_max_active */
3447        lockdep_assert_held(&wq->mutex);
3448
3449        /* fast exit for non-freezable wqs */
3450        if (!freezable && pwq->max_active == wq->saved_max_active)
3451                return;
3452
3453        /* this function can be called during early boot w/ irq disabled */
3454        spin_lock_irqsave(&pwq->pool->lock, flags);
3455
3456        /*
3457         * During [un]freezing, the caller is responsible for ensuring that
3458         * this function is called at least once after @workqueue_freezing
3459         * is updated and visible.
3460         */
3461        if (!freezable || !workqueue_freezing) {
3462                pwq->max_active = wq->saved_max_active;
3463
3464                while (!list_empty(&pwq->delayed_works) &&
3465                       pwq->nr_active < pwq->max_active)
3466                        pwq_activate_first_delayed(pwq);
3467
3468                /*
3469                 * Need to kick a worker after thawed or an unbound wq's
3470                 * max_active is bumped.  It's a slow path.  Do it always.
3471                 */
3472                wake_up_worker(pwq->pool);
3473        } else {
3474                pwq->max_active = 0;
3475        }
3476
3477        spin_unlock_irqrestore(&pwq->pool->lock, flags);
3478}
3479
3480/* initialize newly alloced @pwq which is associated with @wq and @pool */
3481static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3482                     struct worker_pool *pool)
3483{
3484        BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3485
3486        memset(pwq, 0, sizeof(*pwq));
3487
3488        pwq->pool = pool;
3489        pwq->wq = wq;
3490        pwq->flush_color = -1;
3491        pwq->refcnt = 1;
3492        INIT_LIST_HEAD(&pwq->delayed_works);
3493        INIT_LIST_HEAD(&pwq->pwqs_node);
3494        INIT_LIST_HEAD(&pwq->mayday_node);
3495        INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3496}
3497
3498/* sync @pwq with the current state of its associated wq and link it */
3499static void link_pwq(struct pool_workqueue *pwq)
3500{
3501        struct workqueue_struct *wq = pwq->wq;
3502
3503        lockdep_assert_held(&wq->mutex);
3504
3505        /* may be called multiple times, ignore if already linked */
3506        if (!list_empty(&pwq->pwqs_node))
3507                return;
3508
3509        /* set the matching work_color */
3510        pwq->work_color = wq->work_color;
3511
3512        /* sync max_active to the current setting */
3513        pwq_adjust_max_active(pwq);
3514
3515        /* link in @pwq */
3516        list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3517}
3518
3519/* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3520static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3521                                        const struct workqueue_attrs *attrs)
3522{
3523        struct worker_pool *pool;
3524        struct pool_workqueue *pwq;
3525
3526        lockdep_assert_held(&wq_pool_mutex);
3527
3528        pool = get_unbound_pool(attrs);
3529        if (!pool)
3530                return NULL;
3531
3532        pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3533        if (!pwq) {
3534                put_unbound_pool(pool);
3535                return NULL;
3536        }
3537
3538        init_pwq(pwq, wq, pool);
3539        return pwq;
3540}
3541
3542/**
3543 * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
3544 * @attrs: the wq_attrs of the default pwq of the target workqueue
3545 * @node: the target NUMA node
3546 * @cpu_going_down: if >= 0, the CPU to consider as offline
3547 * @cpumask: outarg, the resulting cpumask
3548 *
3549 * Calculate the cpumask a workqueue with @attrs should use on @node.  If
3550 * @cpu_going_down is >= 0, that cpu is considered offline during
3551 * calculation.  The result is stored in @cpumask.
3552 *
3553 * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
3554 * enabled and @node has online CPUs requested by @attrs, the returned
3555 * cpumask is the intersection of the possible CPUs of @node and
3556 * @attrs->cpumask.
3557 *
3558 * The caller is responsible for ensuring that the cpumask of @node stays
3559 * stable.
3560 *
3561 * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3562 * %false if equal.
3563 */
3564static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3565                                 int cpu_going_down, cpumask_t *cpumask)
3566{
3567        if (!wq_numa_enabled || attrs->no_numa)
3568                goto use_dfl;
3569
3570        /* does @node have any online CPUs @attrs wants? */
3571        cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3572        if (cpu_going_down >= 0)
3573                cpumask_clear_cpu(cpu_going_down, cpumask);
3574
3575        if (cpumask_empty(cpumask))
3576                goto use_dfl;
3577
3578        /* yeap, return possible CPUs in @node that @attrs wants */
3579        cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3580
3581        if (cpumask_empty(cpumask)) {
3582                pr_warn_once("WARNING: workqueue cpumask: online intersect > "
3583                                "possible intersect\n");
3584                return false;
3585        }
3586
3587        return !cpumask_equal(cpumask, attrs->cpumask);
3588
3589use_dfl:
3590        cpumask_copy(cpumask, attrs->cpumask);
3591        return false;
3592}
3593
3594/* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
3595static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3596                                                   int node,
3597                                                   struct pool_workqueue *pwq)
3598{
3599        struct pool_workqueue *old_pwq;
3600
3601        lockdep_assert_held(&wq_pool_mutex);
3602        lockdep_assert_held(&wq->mutex);
3603
3604        /* link_pwq() can handle duplicate calls */
3605        link_pwq(pwq);
3606
3607        old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3608        rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3609        return old_pwq;
3610}
3611
3612/* context to store the prepared attrs & pwqs before applying */
3613struct apply_wqattrs_ctx {
3614        struct workqueue_struct *wq;            /* target workqueue */
3615        struct workqueue_attrs  *attrs;         /* attrs to apply */
3616        struct list_head        list;           /* queued for batching commit */
3617        struct pool_workqueue   *dfl_pwq;
3618        struct pool_workqueue   *pwq_tbl[];
3619};
3620
3621/* free the resources after success or abort */
3622static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
3623{
3624        if (ctx) {
3625                int node;
3626
3627                for_each_node(node)
3628                        put_pwq_unlocked(ctx->pwq_tbl[node]);
3629                put_pwq_unlocked(ctx->dfl_pwq);
3630
3631                free_workqueue_attrs(ctx->attrs);
3632
3633                kfree(ctx);
3634        }
3635}
3636
3637/* allocate the attrs and pwqs for later installation */
3638static struct apply_wqattrs_ctx *
3639apply_wqattrs_prepare(struct workqueue_struct *wq,
3640                      const struct workqueue_attrs *attrs)
3641{
3642        struct apply_wqattrs_ctx *ctx;
3643        struct workqueue_attrs *new_attrs, *tmp_attrs;
3644        int node;
3645
3646        lockdep_assert_held(&wq_pool_mutex);
3647
3648        ctx = kzalloc(sizeof(*ctx) + nr_node_ids * sizeof(ctx->pwq_tbl[0]),
3649                      GFP_KERNEL);
3650
3651        new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3652        tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3653        if (!ctx || !new_attrs || !tmp_attrs)
3654                goto out_free;
3655
3656        /*
3657         * Calculate the attrs of the default pwq.
3658         * If the user configured cpumask doesn't overlap with the
3659         * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
3660         */
3661        copy_workqueue_attrs(new_attrs, attrs);
3662        cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
3663        if (unlikely(cpumask_empty(new_attrs->cpumask)))
3664                cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
3665
3666        /*
3667         * We may create multiple pwqs with differing cpumasks.  Make a
3668         * copy of @new_attrs which will be modified and used to obtain
3669         * pools.
3670         */
3671        copy_workqueue_attrs(tmp_attrs, new_attrs);
3672
3673        /*
3674         * If something goes wrong during CPU up/down, we'll fall back to
3675         * the default pwq covering whole @attrs->cpumask.  Always create
3676         * it even if we don't use it immediately.
3677         */
3678        ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3679        if (!ctx->dfl_pwq)
3680                goto out_free;
3681
3682        for_each_node(node) {
3683                if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
3684                        ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3685                        if (!ctx->pwq_tbl[node])
3686                                goto out_free;
3687                } else {
3688                        ctx->dfl_pwq->refcnt++;
3689                        ctx->pwq_tbl[node] = ctx->dfl_pwq;
3690                }
3691        }
3692
3693        /* save the user configured attrs and sanitize it. */
3694        copy_workqueue_attrs(new_attrs, attrs);
3695        cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
3696        ctx->attrs = new_attrs;
3697
3698        ctx->wq = wq;
3699        free_workqueue_attrs(tmp_attrs);
3700        return ctx;
3701
3702out_free:
3703        free_workqueue_attrs(tmp_attrs);
3704        free_workqueue_attrs(new_attrs);
3705        apply_wqattrs_cleanup(ctx);
3706        return NULL;
3707}
3708
3709/* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
3710static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
3711{
3712        int node;
3713
3714        /* all pwqs have been created successfully, let's install'em */
3715        mutex_lock(&ctx->wq->mutex);
3716
3717        copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
3718
3719        /* save the previous pwq and install the new one */
3720        for_each_node(node)
3721                ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
3722                                                          ctx->pwq_tbl[node]);
3723
3724        /* @dfl_pwq might not have been used, ensure it's linked */
3725        link_pwq(ctx->dfl_pwq);
3726        swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
3727
3728        mutex_unlock(&ctx->wq->mutex);
3729}
3730
3731static void apply_wqattrs_lock(void)
3732{
3733        /* CPUs should stay stable across pwq creations and installations */
3734        get_online_cpus();
3735        mutex_lock(&wq_pool_mutex);
3736}
3737
3738static void apply_wqattrs_unlock(void)
3739{
3740        mutex_unlock(&wq_pool_mutex);
3741        put_online_cpus();
3742}
3743
3744static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
3745                                        const struct workqueue_attrs *attrs)
3746{
3747        struct apply_wqattrs_ctx *ctx;
3748
3749        /* only unbound workqueues can change attributes */
3750        if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
3751                return -EINVAL;
3752
3753        /* creating multiple pwqs breaks ordering guarantee */
3754        if (!list_empty(&wq->pwqs)) {
3755                if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
3756                        return -EINVAL;
3757
3758                wq->flags &= ~__WQ_ORDERED;
3759        }
3760
3761        ctx = apply_wqattrs_prepare(wq, attrs);
3762        if (!ctx)
3763                return -ENOMEM;
3764
3765        /* the ctx has been prepared successfully, let's commit it */
3766        apply_wqattrs_commit(ctx);
3767        apply_wqattrs_cleanup(ctx);
3768
3769        return 0;
3770}
3771
3772/**
3773 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
3774 * @wq: the target workqueue
3775 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
3776 *
3777 * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
3778 * machines, this function maps a separate pwq to each NUMA node with
3779 * possibles CPUs in @attrs->cpumask so that work items are affine to the
3780 * NUMA node it was issued on.  Older pwqs are released as in-flight work
3781 * items finish.  Note that a work item which repeatedly requeues itself
3782 * back-to-back will stay on its current pwq.
3783 *
3784 * Performs GFP_KERNEL allocations.
3785 *
3786 * Return: 0 on success and -errno on failure.
3787 */
3788int apply_workqueue_attrs(struct workqueue_struct *wq,
3789                          const struct workqueue_attrs *attrs)
3790{
3791        int ret;
3792
3793        apply_wqattrs_lock();
3794        ret = apply_workqueue_attrs_locked(wq, attrs);
3795        apply_wqattrs_unlock();
3796
3797        return ret;
3798}
3799
3800/**
3801 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
3802 * @wq: the target workqueue
3803 * @cpu: the CPU coming up or going down
3804 * @online: whether @cpu is coming up or going down
3805 *
3806 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
3807 * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update NUMA affinity of
3808 * @wq accordingly.
3809 *
3810 * If NUMA affinity can't be adjusted due to memory allocation failure, it
3811 * falls back to @wq->dfl_pwq which may not be optimal but is always
3812 * correct.
3813 *
3814 * Note that when the last allowed CPU of a NUMA node goes offline for a
3815 * workqueue with a cpumask spanning multiple nodes, the workers which were
3816 * already executing the work items for the workqueue will lose their CPU
3817 * affinity and may execute on any CPU.  This is similar to how per-cpu
3818 * workqueues behave on CPU_DOWN.  If a workqueue user wants strict
3819 * affinity, it's the user's responsibility to flush the work item from
3820 * CPU_DOWN_PREPARE.
3821 */
3822static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
3823                                   bool online)
3824{
3825        int node = cpu_to_node(cpu);
3826        int cpu_off = online ? -1 : cpu;
3827        struct pool_workqueue *old_pwq = NULL, *pwq;
3828        struct workqueue_attrs *target_attrs;
3829        cpumask_t *cpumask;
3830
3831        lockdep_assert_held(&wq_pool_mutex);
3832
3833        if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
3834            wq->unbound_attrs->no_numa)
3835                return;
3836
3837        /*
3838         * We don't wanna alloc/free wq_attrs for each wq for each CPU.
3839         * Let's use a preallocated one.  The following buf is protected by
3840         * CPU hotplug exclusion.
3841         */
3842        target_attrs = wq_update_unbound_numa_attrs_buf;
3843        cpumask = target_attrs->cpumask;
3844
3845        copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
3846        pwq = unbound_pwq_by_node(wq, node);
3847
3848        /*
3849         * Let's determine what needs to be done.  If the target cpumask is
3850         * different from the default pwq's, we need to compare it to @pwq's
3851         * and create a new one if they don't match.  If the target cpumask
3852         * equals the default pwq's, the default pwq should be used.
3853         */
3854        if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
3855                if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
3856                        return;
3857        } else {
3858                goto use_dfl_pwq;
3859        }
3860
3861        /* create a new pwq */
3862        pwq = alloc_unbound_pwq(wq, target_attrs);
3863        if (!pwq) {
3864                pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
3865                        wq->name);
3866                goto use_dfl_pwq;
3867        }
3868
3869        /* Install the new pwq. */
3870        mutex_lock(&wq->mutex);
3871        old_pwq = numa_pwq_tbl_install(wq, node, pwq);
3872        goto out_unlock;
3873
3874use_dfl_pwq:
3875        mutex_lock(&wq->mutex);
3876        spin_lock_irq(&wq->dfl_pwq->pool->lock);
3877        get_pwq(wq->dfl_pwq);
3878        spin_unlock_irq(&wq->dfl_pwq->pool->lock);
3879        old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
3880out_unlock:
3881        mutex_unlock(&wq->mutex);
3882        put_pwq_unlocked(old_pwq);
3883}
3884
3885static int alloc_and_link_pwqs(struct workqueue_struct *wq)
3886{
3887        bool highpri = wq->flags & WQ_HIGHPRI;
3888        int cpu, ret;
3889
3890        if (!(wq->flags & WQ_UNBOUND)) {
3891                wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
3892                if (!wq->cpu_pwqs)
3893                        return -ENOMEM;
3894
3895                for_each_possible_cpu(cpu) {
3896                        struct pool_workqueue *pwq =
3897                                per_cpu_ptr(wq->cpu_pwqs, cpu);
3898                        struct worker_pool *cpu_pools =
3899                                per_cpu(cpu_worker_pools, cpu);
3900
3901                        init_pwq(pwq, wq, &cpu_pools[highpri]);
3902
3903                        mutex_lock(&wq->mutex);
3904                        link_pwq(pwq);
3905                        mutex_unlock(&wq->mutex);
3906                }
3907                return 0;
3908        } else if (wq->flags & __WQ_ORDERED) {
3909                ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
3910                /* there should only be single pwq for ordering guarantee */
3911                WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
3912                              wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
3913                     "ordering guarantee broken for workqueue %s\n", wq->name);
3914                return ret;
3915        } else {
3916                return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
3917        }
3918}
3919
3920static int wq_clamp_max_active(int max_active, unsigned int flags,
3921                               const char *name)
3922{
3923        int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
3924
3925        if (max_active < 1 || max_active > lim)
3926                pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
3927                        max_active, name, 1, lim);
3928
3929        return clamp_val(max_active, 1, lim);
3930}
3931
3932struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3933                                               unsigned int flags,
3934                                               int max_active,
3935                                               struct lock_class_key *key,
3936                                               const char *lock_name, ...)
3937{
3938        size_t tbl_size = 0;
3939        va_list args;
3940        struct workqueue_struct *wq;
3941        struct pool_workqueue *pwq;
3942
3943        /*
3944         * Unbound && max_active == 1 used to imply ordered, which is no
3945         * longer the case on NUMA machines due to per-node pools.  While
3946         * alloc_ordered_workqueue() is the right way to create an ordered
3947         * workqueue, keep the previous behavior to avoid subtle breakages
3948         * on NUMA.
3949         */
3950        if ((flags & WQ_UNBOUND) && max_active == 1)
3951                flags |= __WQ_ORDERED;
3952
3953        /* see the comment above the definition of WQ_POWER_EFFICIENT */
3954        if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
3955                flags |= WQ_UNBOUND;
3956
3957        /* allocate wq and format name */
3958        if (flags & WQ_UNBOUND)
3959                tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
3960
3961        wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
3962        if (!wq)
3963                return NULL;
3964
3965        if (flags & WQ_UNBOUND) {
3966                wq->unbound_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3967                if (!wq->unbound_attrs)
3968                        goto err_free_wq;
3969        }
3970
3971        va_start(args, lock_name);
3972        vsnprintf(wq->name, sizeof(wq->name), fmt, args);
3973        va_end(args);
3974
3975        max_active = max_active ?: WQ_DFL_ACTIVE;
3976        max_active = wq_clamp_max_active(max_active, flags, wq->name);
3977
3978        /* init wq */
3979        wq->flags = flags;
3980        wq->saved_max_active = max_active;
3981        mutex_init(&wq->mutex);
3982        atomic_set(&wq->nr_pwqs_to_flush, 0);
3983        INIT_LIST_HEAD(&wq->pwqs);
3984        INIT_LIST_HEAD(&wq->flusher_queue);
3985        INIT_LIST_HEAD(&wq->flusher_overflow);
3986        INIT_LIST_HEAD(&wq->maydays);
3987
3988        lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
3989        INIT_LIST_HEAD(&wq->list);
3990
3991        if (alloc_and_link_pwqs(wq) < 0)
3992                goto err_free_wq;
3993
3994        /*
3995         * Workqueues which may be used during memory reclaim should
3996         * have a rescuer to guarantee forward progress.
3997         */
3998        if (flags & WQ_MEM_RECLAIM) {
3999                struct worker *rescuer;
4000
4001                rescuer = alloc_worker(NUMA_NO_NODE);
4002                if (!rescuer)
4003                        goto err_destroy;
4004
4005                rescuer->rescue_wq = wq;
4006                rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
4007                                               wq->name);
4008                if (IS_ERR(rescuer->task)) {
4009                        kfree(rescuer);
4010                        goto err_destroy;
4011                }
4012
4013                wq->rescuer = rescuer;
4014                kthread_bind_mask(rescuer->task, cpu_possible_mask);
4015                wake_up_process(rescuer->task);
4016        }
4017
4018        if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4019                goto err_destroy;
4020
4021        /*
4022         * wq_pool_mutex protects global freeze state and workqueues list.
4023         * Grab it, adjust max_active and add the new @wq to workqueues
4024         * list.
4025         */
4026        mutex_lock(&wq_pool_mutex);
4027
4028        mutex_lock(&wq->mutex);
4029        for_each_pwq(pwq, wq)
4030                pwq_adjust_max_active(pwq);
4031        mutex_unlock(&wq->mutex);
4032
4033        list_add_tail_rcu(&wq->list, &workqueues);
4034
4035        mutex_unlock(&wq_pool_mutex);
4036
4037        return wq;
4038
4039err_free_wq:
4040        free_workqueue_attrs(wq->unbound_attrs);
4041        kfree(wq);
4042        return NULL;
4043err_destroy:
4044        destroy_workqueue(wq);
4045        return NULL;
4046}
4047EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
4048
4049/**
4050 * destroy_workqueue - safely terminate a workqueue
4051 * @wq: target workqueue
4052 *
4053 * Safely destroy a workqueue. All work currently pending will be done first.
4054 */
4055void destroy_workqueue(struct workqueue_struct *wq)
4056{
4057        struct pool_workqueue *pwq;
4058        int node;
4059
4060        /* drain it before proceeding with destruction */
4061        drain_workqueue(wq);
4062
4063        /* sanity checks */
4064        mutex_lock(&wq->mutex);
4065        for_each_pwq(pwq, wq) {
4066                int i;
4067
4068                for (i = 0; i < WORK_NR_COLORS; i++) {
4069                        if (WARN_ON(pwq->nr_in_flight[i])) {
4070                                mutex_unlock(&wq->mutex);
4071                                show_workqueue_state();
4072                                return;
4073                        }
4074                }
4075
4076                if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
4077                    WARN_ON(pwq->nr_active) ||
4078                    WARN_ON(!list_empty(&pwq->delayed_works))) {
4079                        mutex_unlock(&wq->mutex);
4080                        show_workqueue_state();
4081                        return;
4082                }
4083        }
4084        mutex_unlock(&wq->mutex);
4085
4086        /*
4087         * wq list is used to freeze wq, remove from list after
4088         * flushing is complete in case freeze races us.
4089         */
4090        mutex_lock(&wq_pool_mutex);
4091        list_del_rcu(&wq->list);
4092        mutex_unlock(&wq_pool_mutex);
4093
4094        workqueue_sysfs_unregister(wq);
4095
4096        if (wq->rescuer)
4097                kthread_stop(wq->rescuer->task);
4098
4099        if (!(wq->flags & WQ_UNBOUND)) {
4100                /*
4101                 * The base ref is never dropped on per-cpu pwqs.  Directly
4102                 * schedule RCU free.
4103                 */
4104                call_rcu_sched(&wq->rcu, rcu_free_wq);
4105        } else {
4106                /*
4107                 * We're the sole accessor of @wq at this point.  Directly
4108                 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
4109                 * @wq will be freed when the last pwq is released.
4110                 */
4111                for_each_node(node) {
4112                        pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4113                        RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4114                        put_pwq_unlocked(pwq);
4115                }
4116
4117                /*
4118                 * Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
4119                 * put.  Don't access it afterwards.
4120                 */
4121                pwq = wq->dfl_pwq;
4122                wq->dfl_pwq = NULL;
4123                put_pwq_unlocked(pwq);
4124        }
4125}
4126EXPORT_SYMBOL_GPL(destroy_workqueue);
4127
4128/**
4129 * workqueue_set_max_active - adjust max_active of a workqueue
4130 * @wq: target workqueue
4131 * @max_active: new max_active value.
4132 *
4133 * Set max_active of @wq to @max_active.
4134 *
4135 * CONTEXT:
4136 * Don't call from IRQ context.
4137 */
4138void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4139{
4140        struct pool_workqueue *pwq;
4141
4142        /* disallow meddling with max_active for ordered workqueues */
4143        if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4144                return;
4145
4146        max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4147
4148        mutex_lock(&wq->mutex);
4149
4150        wq->flags &= ~__WQ_ORDERED;
4151        wq->saved_max_active = max_active;
4152
4153        for_each_pwq(pwq, wq)
4154                pwq_adjust_max_active(pwq);
4155
4156        mutex_unlock(&wq->mutex);
4157}
4158EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4159
4160/**
4161 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4162 *
4163 * Determine whether %current is a workqueue rescuer.  Can be used from
4164 * work functions to determine whether it's being run off the rescuer task.
4165 *
4166 * Return: %true if %current is a workqueue rescuer. %false otherwise.
4167 */
4168bool current_is_workqueue_rescuer(void)
4169{
4170        struct worker *worker = current_wq_worker();
4171
4172        return worker && worker->rescue_wq;
4173}
4174
4175/**
4176 * workqueue_congested - test whether a workqueue is congested
4177 * @cpu: CPU in question
4178 * @wq: target workqueue
4179 *
4180 * Test whether @wq's cpu workqueue for @cpu is congested.  There is
4181 * no synchronization around this function and the test result is
4182 * unreliable and only useful as advisory hints or for debugging.
4183 *
4184 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4185 * Note that both per-cpu and unbound workqueues may be associated with
4186 * multiple pool_workqueues which have separate congested states.  A
4187 * workqueue being congested on one CPU doesn't mean the workqueue is also
4188 * contested on other CPUs / NUMA nodes.
4189 *
4190 * Return:
4191 * %true if congested, %false otherwise.
4192 */
4193bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4194{
4195        struct pool_workqueue *pwq;
4196        bool ret;
4197
4198        rcu_read_lock_sched();
4199
4200        if (cpu == WORK_CPU_UNBOUND)
4201                cpu = smp_processor_id();
4202
4203        if (!(wq->flags & WQ_UNBOUND))
4204                pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4205        else
4206                pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4207
4208        ret = !list_empty(&pwq->delayed_works);
4209        rcu_read_unlock_sched();
4210
4211        return ret;
4212}
4213EXPORT_SYMBOL_GPL(workqueue_congested);
4214
4215/**
4216 * work_busy - test whether a work is currently pending or running
4217 * @work: the work to be tested
4218 *
4219 * Test whether @work is currently pending or running.  There is no
4220 * synchronization around this function and the test result is
4221 * unreliable and only useful as advisory hints or for debugging.
4222 *
4223 * Return:
4224 * OR'd bitmask of WORK_BUSY_* bits.
4225 */
4226unsigned int work_busy(struct work_struct *work)
4227{
4228        struct worker_pool *pool;
4229        unsigned long flags;
4230        unsigned int ret = 0;
4231
4232        if (work_pending(work))
4233                ret |= WORK_BUSY_PENDING;
4234
4235        local_irq_save(flags);
4236        pool = get_work_pool(work);
4237        if (pool) {
4238                spin_lock(&pool->lock);
4239                if (find_worker_executing_work(pool, work))
4240                        ret |= WORK_BUSY_RUNNING;
4241                spin_unlock(&pool->lock);
4242        }
4243        local_irq_restore(flags);
4244
4245        return ret;
4246}
4247EXPORT_SYMBOL_GPL(work_busy);
4248
4249/**
4250 * set_worker_desc - set description for the current work item
4251 * @fmt: printf-style format string
4252 * @...: arguments for the format string
4253 *
4254 * This function can be called by a running work function to describe what
4255 * the work item is about.  If the worker task gets dumped, this
4256 * information will be printed out together to help debugging.  The
4257 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4258 */
4259void set_worker_desc(const char *fmt, ...)
4260{
4261        struct worker *worker = current_wq_worker();
4262        va_list args;
4263
4264        if (worker) {
4265                va_start(args, fmt);
4266                vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4267                va_end(args);
4268                worker->desc_valid = true;
4269        }
4270}
4271
4272/**
4273 * print_worker_info - print out worker information and description
4274 * @log_lvl: the log level to use when printing
4275 * @task: target task
4276 *
4277 * If @task is a worker and currently executing a work item, print out the
4278 * name of the workqueue being serviced and worker description set with
4279 * set_worker_desc() by the currently executing work item.
4280 *
4281 * This function can be safely called on any task as long as the
4282 * task_struct itself is accessible.  While safe, this function isn't
4283 * synchronized and may print out mixups or garbages of limited length.
4284 */
4285void print_worker_info(const char *log_lvl, struct task_struct *task)
4286{
4287        work_func_t *fn = NULL;
4288        char name[WQ_NAME_LEN] = { };
4289        char desc[WORKER_DESC_LEN] = { };
4290        struct pool_workqueue *pwq = NULL;
4291        struct workqueue_struct *wq = NULL;
4292        bool desc_valid = false;
4293        struct worker *worker;
4294
4295        if (!(task->flags & PF_WQ_WORKER))
4296                return;
4297
4298        /*
4299         * This function is called without any synchronization and @task
4300         * could be in any state.  Be careful with dereferences.
4301         */
4302        worker = kthread_probe_data(task);
4303
4304        /*
4305         * Carefully copy the associated workqueue's workfn and name.  Keep
4306         * the original last '\0' in case the original contains garbage.
4307         */
4308        probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
4309        probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
4310        probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
4311        probe_kernel_read(name, wq->name, sizeof(name) - 1);
4312
4313        /* copy worker description */
4314        probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid));
4315        if (desc_valid)
4316                probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
4317
4318        if (fn || name[0] || desc[0]) {
4319                printk("%sWorkqueue: %s %pf", log_lvl, name, fn);
4320                if (desc[0])
4321                        pr_cont(" (%s)", desc);
4322                pr_cont("\n");
4323        }
4324}
4325
4326static void pr_cont_pool_info(struct worker_pool *pool)
4327{
4328        pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
4329        if (pool->node != NUMA_NO_NODE)
4330                pr_cont(" node=%d", pool->node);
4331        pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
4332}
4333
4334static void pr_cont_work(bool comma, struct work_struct *work)
4335{
4336        if (work->func == wq_barrier_func) {
4337                struct wq_barrier *barr;
4338
4339                barr = container_of(work, struct wq_barrier, work);
4340
4341                pr_cont("%s BAR(%d)", comma ? "," : "",
4342                        task_pid_nr(barr->task));
4343        } else {
4344                pr_cont("%s %pf", comma ? "," : "", work->func);
4345        }
4346}
4347
4348static void show_pwq(struct pool_workqueue *pwq)
4349{
4350        struct worker_pool *pool = pwq->pool;
4351        struct work_struct *work;
4352        struct worker *worker;
4353        bool has_in_flight = false, has_pending = false;
4354        int bkt;
4355
4356        pr_info("  pwq %d:", pool->id);
4357        pr_cont_pool_info(pool);
4358
4359        pr_cont(" active=%d/%d%s\n", pwq->nr_active, pwq->max_active,
4360                !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
4361
4362        hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4363                if (worker->current_pwq == pwq) {
4364                        has_in_flight = true;
4365                        break;
4366                }
4367        }
4368        if (has_in_flight) {
4369                bool comma = false;
4370
4371                pr_info("    in-flight:");
4372                hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4373                        if (worker->current_pwq != pwq)
4374                                continue;
4375
4376                        pr_cont("%s %d%s:%pf", comma ? "," : "",
4377                                task_pid_nr(worker->task),
4378                                worker == pwq->wq->rescuer ? "(RESCUER)" : "",
4379                                worker->current_func);
4380                        list_for_each_entry(work, &worker->scheduled, entry)
4381                                pr_cont_work(false, work);
4382                        comma = true;
4383                }
4384                pr_cont("\n");
4385        }
4386
4387        list_for_each_entry(work, &pool->worklist, entry) {
4388                if (get_work_pwq(work) == pwq) {
4389                        has_pending = true;
4390                        break;
4391                }
4392        }
4393        if (has_pending) {
4394                bool comma = false;
4395
4396                pr_info("    pending:");
4397                list_for_each_entry(work, &pool->worklist, entry) {
4398                        if (get_work_pwq(work) != pwq)
4399                                continue;
4400
4401                        pr_cont_work(comma, work);
4402                        comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4403                }
4404                pr_cont("\n");
4405        }
4406
4407        if (!list_empty(&pwq->delayed_works)) {
4408                bool comma = false;
4409
4410                pr_info("    delayed:");
4411                list_for_each_entry(work, &pwq->delayed_works, entry) {
4412                        pr_cont_work(comma, work);
4413                        comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4414                }
4415                pr_cont("\n");
4416        }
4417}
4418
4419/**
4420 * show_workqueue_state - dump workqueue state
4421 *
4422 * Called from a sysrq handler or try_to_freeze_tasks() and prints out
4423 * all busy workqueues and pools.
4424 */
4425void show_workqueue_state(void)
4426{
4427        struct workqueue_struct *wq;
4428        struct worker_pool *pool;
4429        unsigned long flags;
4430        int pi;
4431
4432        rcu_read_lock_sched();
4433
4434        pr_info("Showing busy workqueues and worker pools:\n");
4435
4436        list_for_each_entry_rcu(wq, &workqueues, list) {
4437                struct pool_workqueue *pwq;
4438                bool idle = true;
4439
4440                for_each_pwq(pwq, wq) {
4441                        if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
4442                                idle = false;
4443                                break;
4444                        }
4445                }
4446                if (idle)
4447                        continue;
4448
4449                pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4450
4451                for_each_pwq(pwq, wq) {
4452                        spin_lock_irqsave(&pwq->pool->lock, flags);
4453                        if (pwq->nr_active || !list_empty(&pwq->delayed_works))
4454                                show_pwq(pwq);
4455                        spin_unlock_irqrestore(&pwq->pool->lock, flags);
4456                }
4457        }
4458
4459        for_each_pool(pool, pi) {
4460                struct worker *worker;
4461                bool first = true;
4462
4463                spin_lock_irqsave(&pool->lock, flags);
4464                if (pool->nr_workers == pool->nr_idle)
4465                        goto next_pool;
4466
4467                pr_info("pool %d:", pool->id);
4468                pr_cont_pool_info(pool);
4469                pr_cont(" hung=%us workers=%d",
4470                        jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
4471                        pool->nr_workers);
4472                if (pool->manager)
4473                        pr_cont(" manager: %d",
4474                                task_pid_nr(pool->manager->task));
4475                list_for_each_entry(worker, &pool->idle_list, entry) {
4476                        pr_cont(" %s%d", first ? "idle: " : "",
4477                                task_pid_nr(worker->task));
4478                        first = false;
4479                }
4480                pr_cont("\n");
4481        next_pool:
4482                spin_unlock_irqrestore(&pool->lock, flags);
4483        }
4484
4485        rcu_read_unlock_sched();
4486}
4487
4488/*
4489 * CPU hotplug.
4490 *
4491 * There are two challenges in supporting CPU hotplug.  Firstly, there
4492 * are a lot of assumptions on strong associations among work, pwq and
4493 * pool which make migrating pending and scheduled works very
4494 * difficult to implement without impacting hot paths.  Secondly,
4495 * worker pools serve mix of short, long and very long running works making
4496 * blocked draining impractical.
4497 *
4498 * This is solved by allowing the pools to be disassociated from the CPU
4499 * running as an unbound one and allowing it to be reattached later if the
4500 * cpu comes back online.
4501 */
4502
4503static void wq_unbind_fn(struct work_struct *work)
4504{
4505        int cpu = smp_processor_id();
4506        struct worker_pool *pool;
4507        struct worker *worker;
4508
4509        for_each_cpu_worker_pool(pool, cpu) {
4510                mutex_lock(&pool->attach_mutex);
4511                spin_lock_irq(&pool->lock);
4512
4513                /*
4514                 * We've blocked all attach/detach operations. Make all workers
4515                 * unbound and set DISASSOCIATED.  Before this, all workers
4516                 * except for the ones which are still executing works from
4517                 * before the last CPU down must be on the cpu.  After
4518                 * this, they may become diasporas.
4519                 */
4520                for_each_pool_worker(worker, pool)
4521                        worker->flags |= WORKER_UNBOUND;
4522
4523                pool->flags |= POOL_DISASSOCIATED;
4524
4525                spin_unlock_irq(&pool->lock);
4526                mutex_unlock(&pool->attach_mutex);
4527
4528                /*
4529                 * Call schedule() so that we cross rq->lock and thus can
4530                 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
4531                 * This is necessary as scheduler callbacks may be invoked
4532                 * from other cpus.
4533                 */
4534                schedule();
4535
4536                /*
4537                 * Sched callbacks are disabled now.  Zap nr_running.
4538                 * After this, nr_running stays zero and need_more_worker()
4539                 * and keep_working() are always true as long as the
4540                 * worklist is not empty.  This pool now behaves as an
4541                 * unbound (in terms of concurrency management) pool which
4542                 * are served by workers tied to the pool.
4543                 */
4544                atomic_set(&pool->nr_running, 0);
4545
4546                /*
4547                 * With concurrency management just turned off, a busy
4548                 * worker blocking could lead to lengthy stalls.  Kick off
4549                 * unbound chain execution of currently pending work items.
4550                 */
4551                spin_lock_irq(&pool->lock);
4552                wake_up_worker(pool);
4553                spin_unlock_irq(&pool->lock);
4554        }
4555}
4556
4557/**
4558 * rebind_workers - rebind all workers of a pool to the associated CPU
4559 * @pool: pool of interest
4560 *
4561 * @pool->cpu is coming online.  Rebind all workers to the CPU.
4562 */
4563static void rebind_workers(struct worker_pool *pool)
4564{
4565        struct worker *worker;
4566
4567        lockdep_assert_held(&pool->attach_mutex);
4568
4569        /*
4570         * Restore CPU affinity of all workers.  As all idle workers should
4571         * be on the run-queue of the associated CPU before any local
4572         * wake-ups for concurrency management happen, restore CPU affinity
4573         * of all workers first and then clear UNBOUND.  As we're called
4574         * from CPU_ONLINE, the following shouldn't fail.
4575         */
4576        for_each_pool_worker(worker, pool)
4577                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4578                                                  pool->attrs->cpumask) < 0);
4579
4580        spin_lock_irq(&pool->lock);
4581
4582        /*
4583         * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED
4584         * w/o preceding DOWN_PREPARE.  Work around it.  CPU hotplug is
4585         * being reworked and this can go away in time.
4586         */
4587        if (!(pool->flags & POOL_DISASSOCIATED)) {
4588                spin_unlock_irq(&pool->lock);
4589                return;
4590        }
4591
4592        pool->flags &= ~POOL_DISASSOCIATED;
4593
4594        for_each_pool_worker(worker, pool) {
4595                unsigned int worker_flags = worker->flags;
4596
4597                /*
4598                 * A bound idle worker should actually be on the runqueue
4599                 * of the associated CPU for local wake-ups targeting it to
4600                 * work.  Kick all idle workers so that they migrate to the
4601                 * associated CPU.  Doing this in the same loop as
4602                 * replacing UNBOUND with REBOUND is safe as no worker will
4603                 * be bound before @pool->lock is released.
4604                 */
4605                if (worker_flags & WORKER_IDLE)
4606                        wake_up_process(worker->task);
4607
4608                /*
4609                 * We want to clear UNBOUND but can't directly call
4610                 * worker_clr_flags() or adjust nr_running.  Atomically
4611                 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
4612                 * @worker will clear REBOUND using worker_clr_flags() when
4613                 * it initiates the next execution cycle thus restoring
4614                 * concurrency management.  Note that when or whether
4615                 * @worker clears REBOUND doesn't affect correctness.
4616                 *
4617                 * ACCESS_ONCE() is necessary because @worker->flags may be
4618                 * tested without holding any lock in
4619                 * wq_worker_waking_up().  Without it, NOT_RUNNING test may
4620                 * fail incorrectly leading to premature concurrency
4621                 * management operations.
4622                 */
4623                WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4624                worker_flags |= WORKER_REBOUND;
4625                worker_flags &= ~WORKER_UNBOUND;
4626                ACCESS_ONCE(worker->flags) = worker_flags;
4627        }
4628
4629        spin_unlock_irq(&pool->lock);
4630}
4631
4632/**
4633 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
4634 * @pool: unbound pool of interest
4635 * @cpu: the CPU which is coming up
4636 *
4637 * An unbound pool may end up with a cpumask which doesn't have any online
4638 * CPUs.  When a worker of such pool get scheduled, the scheduler resets
4639 * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
4640 * online CPU before, cpus_allowed of all its workers should be restored.
4641 */
4642static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4643{
4644        static cpumask_t cpumask;
4645        struct worker *worker;
4646
4647        lockdep_assert_held(&pool->attach_mutex);
4648
4649        /* is @cpu allowed for @pool? */
4650        if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
4651                return;
4652
4653        cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
4654
4655        /* as we're called from CPU_ONLINE, the following shouldn't fail */
4656        for_each_pool_worker(worker, pool)
4657                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
4658}
4659
4660int workqueue_prepare_cpu(unsigned int cpu)
4661{
4662        struct worker_pool *pool;
4663
4664        for_each_cpu_worker_pool(pool, cpu) {
4665                if (pool->nr_workers)
4666                        continue;
4667                if (!create_worker(pool))
4668                        return -ENOMEM;
4669        }
4670        return 0;
4671}
4672
4673int workqueue_online_cpu(unsigned int cpu)
4674{
4675        struct worker_pool *pool;
4676        struct workqueue_struct *wq;
4677        int pi;
4678
4679        mutex_lock(&wq_pool_mutex);
4680
4681        for_each_pool(pool, pi) {
4682                mutex_lock(&pool->attach_mutex);
4683
4684                if (pool->cpu == cpu)
4685                        rebind_workers(pool);
4686                else if (pool->cpu < 0)
4687                        restore_unbound_workers_cpumask(pool, cpu);
4688
4689                mutex_unlock(&pool->attach_mutex);
4690        }
4691
4692        /* update NUMA affinity of unbound workqueues */
4693        list_for_each_entry(wq, &workqueues, list)
4694                wq_update_unbound_numa(wq, cpu, true);
4695
4696        mutex_unlock(&wq_pool_mutex);
4697        return 0;
4698}
4699
4700int workqueue_offline_cpu(unsigned int cpu)
4701{
4702        struct work_struct unbind_work;
4703        struct workqueue_struct *wq;
4704
4705        /* unbinding per-cpu workers should happen on the local CPU */
4706        INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn);
4707        queue_work_on(cpu, system_highpri_wq, &unbind_work);
4708
4709        /* update NUMA affinity of unbound workqueues */
4710        mutex_lock(&wq_pool_mutex);
4711        list_for_each_entry(wq, &workqueues, list)
4712                wq_update_unbound_numa(wq, cpu, false);
4713        mutex_unlock(&wq_pool_mutex);
4714
4715        /* wait for per-cpu unbinding to finish */
4716        flush_work(&unbind_work);
4717        destroy_work_on_stack(&unbind_work);
4718        return 0;
4719}
4720
4721#ifdef CONFIG_SMP
4722
4723struct work_for_cpu {
4724        struct work_struct work;
4725        long (*fn)(void *);
4726        void *arg;
4727        long ret;
4728};
4729
4730static void work_for_cpu_fn(struct work_struct *work)
4731{
4732        struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
4733
4734        wfc->ret = wfc->fn(wfc->arg);
4735}
4736
4737/**
4738 * work_on_cpu - run a function in thread context on a particular cpu
4739 * @cpu: the cpu to run on
4740 * @fn: the function to run
4741 * @arg: the function arg
4742 *
4743 * It is up to the caller to ensure that the cpu doesn't go offline.
4744 * The caller must not hold any locks which would prevent @fn from completing.
4745 *
4746 * Return: The value @fn returns.
4747 */
4748long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
4749{
4750        struct work_for_cpu wfc = { .fn = fn, .arg = arg };
4751
4752        INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
4753        schedule_work_on(cpu, &wfc.work);
4754        flush_work(&wfc.work);
4755        destroy_work_on_stack(&wfc.work);
4756        return wfc.ret;
4757}
4758EXPORT_SYMBOL_GPL(work_on_cpu);
4759
4760/**
4761 * work_on_cpu_safe - run a function in thread context on a particular cpu
4762 * @cpu: the cpu to run on
4763 * @fn:  the function to run
4764 * @arg: the function argument
4765 *
4766 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
4767 * any locks which would prevent @fn from completing.
4768 *
4769 * Return: The value @fn returns.
4770 */
4771long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
4772{
4773        long ret = -ENODEV;
4774
4775        get_online_cpus();
4776        if (cpu_online(cpu))
4777                ret = work_on_cpu(cpu, fn, arg);
4778        put_online_cpus();
4779        return ret;
4780}
4781EXPORT_SYMBOL_GPL(work_on_cpu_safe);
4782#endif /* CONFIG_SMP */
4783
4784#ifdef CONFIG_FREEZER
4785
4786/**
4787 * freeze_workqueues_begin - begin freezing workqueues
4788 *
4789 * Start freezing workqueues.  After this function returns, all freezable
4790 * workqueues will queue new works to their delayed_works list instead of
4791 * pool->worklist.
4792 *
4793 * CONTEXT:
4794 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4795 */
4796void freeze_workqueues_begin(void)
4797{
4798        struct workqueue_struct *wq;
4799        struct pool_workqueue *pwq;
4800
4801        mutex_lock(&wq_pool_mutex);
4802
4803        WARN_ON_ONCE(workqueue_freezing);
4804        workqueue_freezing = true;
4805
4806        list_for_each_entry(wq, &workqueues, list) {
4807                mutex_lock(&wq->mutex);
4808                for_each_pwq(pwq, wq)
4809                        pwq_adjust_max_active(pwq);
4810                mutex_unlock(&wq->mutex);
4811        }
4812
4813        mutex_unlock(&wq_pool_mutex);
4814}
4815
4816/**
4817 * freeze_workqueues_busy - are freezable workqueues still busy?
4818 *
4819 * Check whether freezing is complete.  This function must be called
4820 * between freeze_workqueues_begin() and thaw_workqueues().
4821 *
4822 * CONTEXT:
4823 * Grabs and releases wq_pool_mutex.
4824 *
4825 * Return:
4826 * %true if some freezable workqueues are still busy.  %false if freezing
4827 * is complete.
4828 */
4829bool freeze_workqueues_busy(void)
4830{
4831        bool busy = false;
4832        struct workqueue_struct *wq;
4833        struct pool_workqueue *pwq;
4834
4835        mutex_lock(&wq_pool_mutex);
4836
4837        WARN_ON_ONCE(!workqueue_freezing);
4838
4839        list_for_each_entry(wq, &workqueues, list) {
4840                if (!(wq->flags & WQ_FREEZABLE))
4841                        continue;
4842                /*
4843                 * nr_active is monotonically decreasing.  It's safe
4844                 * to peek without lock.
4845                 */
4846                rcu_read_lock_sched();
4847                for_each_pwq(pwq, wq) {
4848                        WARN_ON_ONCE(pwq->nr_active < 0);
4849                        if (pwq->nr_active) {
4850                                busy = true;
4851                                rcu_read_unlock_sched();
4852                                goto out_unlock;
4853                        }
4854                }
4855                rcu_read_unlock_sched();
4856        }
4857out_unlock:
4858        mutex_unlock(&wq_pool_mutex);
4859        return busy;
4860}
4861
4862/**
4863 * thaw_workqueues - thaw workqueues
4864 *
4865 * Thaw workqueues.  Normal queueing is restored and all collected
4866 * frozen works are transferred to their respective pool worklists.
4867 *
4868 * CONTEXT:
4869 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
4870 */
4871void thaw_workqueues(void)
4872{
4873        struct workqueue_struct *wq;
4874        struct pool_workqueue *pwq;
4875
4876        mutex_lock(&wq_pool_mutex);
4877
4878        if (!workqueue_freezing)
4879                goto out_unlock;
4880
4881        workqueue_freezing = false;
4882
4883        /* restore max_active and repopulate worklist */
4884        list_for_each_entry(wq, &workqueues, list) {
4885                mutex_lock(&wq->mutex);
4886                for_each_pwq(pwq, wq)
4887                        pwq_adjust_max_active(pwq);
4888                mutex_unlock(&wq->mutex);
4889        }
4890
4891out_unlock:
4892        mutex_unlock(&wq_pool_mutex);
4893}
4894#endif /* CONFIG_FREEZER */
4895
4896static int workqueue_apply_unbound_cpumask(void)
4897{
4898        LIST_HEAD(ctxs);
4899        int ret = 0;
4900        struct workqueue_struct *wq;
4901        struct apply_wqattrs_ctx *ctx, *n;
4902
4903        lockdep_assert_held(&wq_pool_mutex);
4904
4905        list_for_each_entry(wq, &workqueues, list) {
4906                if (!(wq->flags & WQ_UNBOUND))
4907                        continue;
4908                /* creating multiple pwqs breaks ordering guarantee */
4909                if (wq->flags & __WQ_ORDERED)
4910                        continue;
4911
4912                ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
4913                if (!ctx) {
4914                        ret = -ENOMEM;
4915                        break;
4916                }
4917
4918                list_add_tail(&ctx->list, &ctxs);
4919        }
4920
4921        list_for_each_entry_safe(ctx, n, &ctxs, list) {
4922                if (!ret)
4923                        apply_wqattrs_commit(ctx);
4924                apply_wqattrs_cleanup(ctx);
4925        }
4926
4927        return ret;
4928}
4929
4930/**
4931 *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
4932 *  @cpumask: the cpumask to set
4933 *
4934 *  The low-level workqueues cpumask is a global cpumask that limits
4935 *  the affinity of all unbound workqueues.  This function check the @cpumask
4936 *  and apply it to all unbound workqueues and updates all pwqs of them.
4937 *
4938 *  Retun:      0       - Success
4939 *              -EINVAL - Invalid @cpumask
4940 *              -ENOMEM - Failed to allocate memory for attrs or pwqs.
4941 */
4942int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
4943{
4944        int ret = -EINVAL;
4945        cpumask_var_t saved_cpumask;
4946
4947        if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
4948                return -ENOMEM;
4949
4950        cpumask_and(cpumask, cpumask, cpu_possible_mask);
4951        if (!cpumask_empty(cpumask)) {
4952                apply_wqattrs_lock();
4953
4954                /* save the old wq_unbound_cpumask. */
4955                cpumask_copy(saved_cpumask, wq_unbound_cpumask);
4956
4957                /* update wq_unbound_cpumask at first and apply it to wqs. */
4958                cpumask_copy(wq_unbound_cpumask, cpumask);
4959                ret = workqueue_apply_unbound_cpumask();
4960
4961                /* restore the wq_unbound_cpumask when failed. */
4962                if (ret < 0)
4963                        cpumask_copy(wq_unbound_cpumask, saved_cpumask);
4964
4965                apply_wqattrs_unlock();
4966        }
4967
4968        free_cpumask_var(saved_cpumask);
4969        return ret;
4970}
4971
4972#ifdef CONFIG_SYSFS
4973/*
4974 * Workqueues with WQ_SYSFS flag set is visible to userland via
4975 * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
4976 * following attributes.
4977 *
4978 *  per_cpu     RO bool : whether the workqueue is per-cpu or unbound
4979 *  max_active  RW int  : maximum number of in-flight work items
4980 *
4981 * Unbound workqueues have the following extra attributes.
4982 *
4983 *  id          RO int  : the associated pool ID
4984 *  nice        RW int  : nice value of the workers
4985 *  cpumask     RW mask : bitmask of allowed CPUs for the workers
4986 */
4987struct wq_device {
4988        struct workqueue_struct         *wq;
4989        struct device                   dev;
4990};
4991
4992static struct workqueue_struct *dev_to_wq(struct device *dev)
4993{
4994        struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
4995
4996        return wq_dev->wq;
4997}
4998
4999static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5000                            char *buf)
5001{
5002        struct workqueue_struct *wq = dev_to_wq(dev);
5003
5004        return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5005}
5006static DEVICE_ATTR_RO(per_cpu);
5007
5008static ssize_t max_active_show(struct device *dev,
5009                               struct device_attribute *attr, char *buf)
5010{
5011        struct workqueue_struct *wq = dev_to_wq(dev);
5012
5013        return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5014}
5015
5016static ssize_t max_active_store(struct device *dev,
5017                                struct device_attribute *attr, const char *buf,
5018                                size_t count)
5019{
5020        struct workqueue_struct *wq = dev_to_wq(dev);
5021        int val;
5022
5023        if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5024                return -EINVAL;
5025
5026        workqueue_set_max_active(wq, val);
5027        return count;
5028}
5029static DEVICE_ATTR_RW(max_active);
5030
5031static struct attribute *wq_sysfs_attrs[] = {
5032        &dev_attr_per_cpu.attr,
5033        &dev_attr_max_active.attr,
5034        NULL,
5035};
5036ATTRIBUTE_GROUPS(wq_sysfs);
5037
5038static ssize_t wq_pool_ids_show(struct device *dev,
5039                                struct device_attribute *attr, char *buf)
5040{
5041        struct workqueue_struct *wq = dev_to_wq(dev);
5042        const char *delim = "";
5043        int node, written = 0;
5044
5045        rcu_read_lock_sched();
5046        for_each_node(node) {
5047                written += scnprintf(buf + written, PAGE_SIZE - written,
5048                                     "%s%d:%d", delim, node,
5049                                     unbound_pwq_by_node(wq, node)->pool->id);
5050                delim = " ";
5051        }
5052        written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
5053        rcu_read_unlock_sched();
5054
5055        return written;
5056}
5057
5058static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
5059                            char *buf)
5060{
5061        struct workqueue_struct *wq = dev_to_wq(dev);
5062        int written;
5063
5064        mutex_lock(&wq->mutex);
5065        written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
5066        mutex_unlock(&wq->mutex);
5067
5068        return written;
5069}
5070
5071/* prepare workqueue_attrs for sysfs store operations */
5072static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
5073{
5074        struct workqueue_attrs *attrs;
5075
5076        lockdep_assert_held(&wq_pool_mutex);
5077
5078        attrs = alloc_workqueue_attrs(GFP_KERNEL);
5079        if (!attrs)
5080                return NULL;
5081
5082        copy_workqueue_attrs(attrs, wq->unbound_attrs);
5083        return attrs;
5084}
5085
5086static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
5087                             const char *buf, size_t count)
5088{
5089        struct workqueue_struct *wq = dev_to_wq(dev);
5090        struct workqueue_attrs *attrs;
5091        int ret = -ENOMEM;
5092
5093        apply_wqattrs_lock();
5094
5095        attrs = wq_sysfs_prep_attrs(wq);
5096        if (!attrs)
5097                goto out_unlock;
5098
5099        if (sscanf(buf, "%d", &attrs->nice) == 1 &&
5100            attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
5101                ret = apply_workqueue_attrs_locked(wq, attrs);
5102        else
5103                ret = -EINVAL;
5104
5105out_unlock:
5106        apply_wqattrs_unlock();
5107        free_workqueue_attrs(attrs);
5108        return ret ?: count;
5109}
5110
5111static ssize_t wq_cpumask_show(struct device *dev,
5112                               struct device_attribute *attr, char *buf)
5113{
5114        struct workqueue_struct *wq = dev_to_wq(dev);
5115        int written;
5116
5117        mutex_lock(&wq->mutex);
5118        written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5119                            cpumask_pr_args(wq->unbound_attrs->cpumask));
5120        mutex_unlock(&wq->mutex);
5121        return written;
5122}
5123
5124static ssize_t wq_cpumask_store(struct device *dev,
5125                                struct device_attribute *attr,
5126                                const char *buf, size_t count)
5127{
5128        struct workqueue_struct *wq = dev_to_wq(dev);
5129        struct workqueue_attrs *attrs;
5130        int ret = -ENOMEM;
5131
5132        apply_wqattrs_lock();
5133
5134        attrs = wq_sysfs_prep_attrs(wq);
5135        if (!attrs)
5136                goto out_unlock;
5137
5138        ret = cpumask_parse(buf, attrs->cpumask);
5139        if (!ret)
5140                ret = apply_workqueue_attrs_locked(wq, attrs);
5141
5142out_unlock:
5143        apply_wqattrs_unlock();
5144        free_workqueue_attrs(attrs);
5145        return ret ?: count;
5146}
5147
5148static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
5149                            char *buf)
5150{
5151        struct workqueue_struct *wq = dev_to_wq(dev);
5152        int written;
5153
5154        mutex_lock(&wq->mutex);
5155        written = scnprintf(buf, PAGE_SIZE, "%d\n",
5156                            !wq->unbound_attrs->no_numa);
5157        mutex_unlock(&wq->mutex);
5158
5159        return written;
5160}
5161
5162static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
5163                             const char *buf, size_t count)
5164{
5165        struct workqueue_struct *wq = dev_to_wq(dev);
5166        struct workqueue_attrs *attrs;
5167        int v, ret = -ENOMEM;
5168
5169        apply_wqattrs_lock();
5170
5171        attrs = wq_sysfs_prep_attrs(wq);
5172        if (!attrs)
5173                goto out_unlock;
5174
5175        ret = -EINVAL;
5176        if (sscanf(buf, "%d", &v) == 1) {
5177                attrs->no_numa = !v;
5178                ret = apply_workqueue_attrs_locked(wq, attrs);
5179        }
5180
5181out_unlock:
5182        apply_wqattrs_unlock();
5183        free_workqueue_attrs(attrs);
5184        return ret ?: count;
5185}
5186
5187static struct device_attribute wq_sysfs_unbound_attrs[] = {
5188        __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
5189        __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
5190        __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
5191        __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
5192        __ATTR_NULL,
5193};
5194
5195static struct bus_type wq_subsys = {
5196        .name                           = "workqueue",
5197        .dev_groups                     = wq_sysfs_groups,
5198};
5199
5200static ssize_t wq_unbound_cpumask_show(struct device *dev,
5201                struct device_attribute *attr, char *buf)
5202{
5203        int written;
5204
5205        mutex_lock(&wq_pool_mutex);
5206        written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5207                            cpumask_pr_args(wq_unbound_cpumask));
5208        mutex_unlock(&wq_pool_mutex);
5209
5210        return written;
5211}
5212
5213static ssize_t wq_unbound_cpumask_store(struct device *dev,
5214                struct device_attribute *attr, const char *buf, size_t count)
5215{
5216        cpumask_var_t cpumask;
5217        int ret;
5218
5219        if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
5220                return -ENOMEM;
5221
5222        ret = cpumask_parse(buf, cpumask);
5223        if (!ret)
5224                ret = workqueue_set_unbound_cpumask(cpumask);
5225
5226        free_cpumask_var(cpumask);
5227        return ret ? ret : count;
5228}
5229
5230static struct device_attribute wq_sysfs_cpumask_attr =
5231        __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
5232               wq_unbound_cpumask_store);
5233
5234static int __init wq_sysfs_init(void)
5235{
5236        int err;
5237
5238        err = subsys_virtual_register(&wq_subsys, NULL);
5239        if (err)
5240                return err;
5241
5242        return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
5243}
5244core_initcall(wq_sysfs_init);
5245
5246static void wq_device_release(struct device *dev)
5247{
5248        struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5249
5250        kfree(wq_dev);
5251}
5252
5253/**
5254 * workqueue_sysfs_register - make a workqueue visible in sysfs
5255 * @wq: the workqueue to register
5256 *
5257 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
5258 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
5259 * which is the preferred method.
5260 *
5261 * Workqueue user should use this function directly iff it wants to apply
5262 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
5263 * apply_workqueue_attrs() may race against userland updating the
5264 * attributes.
5265 *
5266 * Return: 0 on success, -errno on failure.
5267 */
5268int workqueue_sysfs_register(struct workqueue_struct *wq)
5269{
5270        struct wq_device *wq_dev;
5271        int ret;
5272
5273        /*
5274         * Adjusting max_active or creating new pwqs by applying
5275         * attributes breaks ordering guarantee.  Disallow exposing ordered
5276         * workqueues.
5277         */
5278        if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5279                return -EINVAL;
5280
5281        wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
5282        if (!wq_dev)
5283                return -ENOMEM;
5284
5285        wq_dev->wq = wq;
5286        wq_dev->dev.bus = &wq_subsys;
5287        wq_dev->dev.release = wq_device_release;
5288        dev_set_name(&wq_dev->dev, "%s", wq->name);
5289
5290        /*
5291         * unbound_attrs are created separately.  Suppress uevent until
5292         * everything is ready.
5293         */
5294        dev_set_uevent_suppress(&wq_dev->dev, true);
5295
5296        ret = device_register(&wq_dev->dev);
5297        if (ret) {
5298                kfree(wq_dev);
5299                wq->wq_dev = NULL;
5300                return ret;
5301        }
5302
5303        if (wq->flags & WQ_UNBOUND) {
5304                struct device_attribute *attr;
5305
5306                for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
5307                        ret = device_create_file(&wq_dev->dev, attr);
5308                        if (ret) {
5309                                device_unregister(&wq_dev->dev);
5310                                wq->wq_dev = NULL;
5311                                return ret;
5312                        }
5313                }
5314        }
5315
5316        dev_set_uevent_suppress(&wq_dev->dev, false);
5317        kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
5318        return 0;
5319}
5320
5321/**
5322 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
5323 * @wq: the workqueue to unregister
5324 *
5325 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
5326 */
5327static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
5328{
5329        struct wq_device *wq_dev = wq->wq_dev;
5330
5331        if (!wq->wq_dev)
5332                return;
5333
5334        wq->wq_dev = NULL;
5335        device_unregister(&wq_dev->dev);
5336}
5337#else   /* CONFIG_SYSFS */
5338static void workqueue_sysfs_unregister(struct workqueue_struct *wq)     { }
5339#endif  /* CONFIG_SYSFS */
5340
5341/*
5342 * Workqueue watchdog.
5343 *
5344 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
5345 * flush dependency, a concurrency managed work item which stays RUNNING
5346 * indefinitely.  Workqueue stalls can be very difficult to debug as the
5347 * usual warning mechanisms don't trigger and internal workqueue state is
5348 * largely opaque.
5349 *
5350 * Workqueue watchdog monitors all worker pools periodically and dumps
5351 * state if some pools failed to make forward progress for a while where
5352 * forward progress is defined as the first item on ->worklist changing.
5353 *
5354 * This mechanism is controlled through the kernel parameter
5355 * "workqueue.watchdog_thresh" which can be updated at runtime through the
5356 * corresponding sysfs parameter file.
5357 */
5358#ifdef CONFIG_WQ_WATCHDOG
5359
5360static void wq_watchdog_timer_fn(unsigned long data);
5361
5362static unsigned long wq_watchdog_thresh = 30;
5363static struct timer_list wq_watchdog_timer =
5364        TIMER_DEFERRED_INITIALIZER(wq_watchdog_timer_fn, 0, 0);
5365
5366static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
5367static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
5368
5369static void wq_watchdog_reset_touched(void)
5370{
5371        int cpu;
5372
5373        wq_watchdog_touched = jiffies;
5374        for_each_possible_cpu(cpu)
5375                per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5376}
5377
5378static void wq_watchdog_timer_fn(unsigned long data)
5379{
5380        unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
5381        bool lockup_detected = false;
5382        struct worker_pool *pool;
5383        int pi;
5384
5385        if (!thresh)
5386                return;
5387
5388        rcu_read_lock();
5389
5390        for_each_pool(pool, pi) {
5391                unsigned long pool_ts, touched, ts;
5392
5393                if (list_empty(&pool->worklist))
5394                        continue;
5395
5396                /* get the latest of pool and touched timestamps */
5397                pool_ts = READ_ONCE(pool->watchdog_ts);
5398                touched = READ_ONCE(wq_watchdog_touched);
5399
5400                if (time_after(pool_ts, touched))
5401                        ts = pool_ts;
5402                else
5403                        ts = touched;
5404
5405                if (pool->cpu >= 0) {
5406                        unsigned long cpu_touched =
5407                                READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
5408                                                  pool->cpu));
5409                        if (time_after(cpu_touched, ts))
5410                                ts = cpu_touched;
5411                }
5412
5413                /* did we stall? */
5414                if (time_after(jiffies, ts + thresh)) {
5415                        lockup_detected = true;
5416                        pr_emerg("BUG: workqueue lockup - pool");
5417                        pr_cont_pool_info(pool);
5418                        pr_cont(" stuck for %us!\n",
5419                                jiffies_to_msecs(jiffies - pool_ts) / 1000);
5420                }
5421        }
5422
5423        rcu_read_unlock();
5424
5425        if (lockup_detected)
5426                show_workqueue_state();
5427
5428        wq_watchdog_reset_touched();
5429        mod_timer(&wq_watchdog_timer, jiffies + thresh);
5430}
5431
5432void wq_watchdog_touch(int cpu)
5433{
5434        if (cpu >= 0)
5435                per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5436        else
5437                wq_watchdog_touched = jiffies;
5438}
5439
5440static void wq_watchdog_set_thresh(unsigned long thresh)
5441{
5442        wq_watchdog_thresh = 0;
5443        del_timer_sync(&wq_watchdog_timer);
5444
5445        if (thresh) {
5446                wq_watchdog_thresh = thresh;
5447                wq_watchdog_reset_touched();
5448                mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
5449        }
5450}
5451
5452static int wq_watchdog_param_set_thresh(const char *val,
5453                                        const struct kernel_param *kp)
5454{
5455        unsigned long thresh;
5456        int ret;
5457
5458        ret = kstrtoul(val, 0, &thresh);
5459        if (ret)
5460                return ret;
5461
5462        if (system_wq)
5463                wq_watchdog_set_thresh(thresh);
5464        else
5465                wq_watchdog_thresh = thresh;
5466
5467        return 0;
5468}
5469
5470static const struct kernel_param_ops wq_watchdog_thresh_ops = {
5471        .set    = wq_watchdog_param_set_thresh,
5472        .get    = param_get_ulong,
5473};
5474
5475module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
5476                0644);
5477
5478static void wq_watchdog_init(void)
5479{
5480        wq_watchdog_set_thresh(wq_watchdog_thresh);
5481}
5482
5483#else   /* CONFIG_WQ_WATCHDOG */
5484
5485static inline void wq_watchdog_init(void) { }
5486
5487#endif  /* CONFIG_WQ_WATCHDOG */
5488
5489static void __init wq_numa_init(void)
5490{
5491        cpumask_var_t *tbl;
5492        int node, cpu;
5493
5494        if (num_possible_nodes() <= 1)
5495                return;
5496
5497        if (wq_disable_numa) {
5498                pr_info("workqueue: NUMA affinity support disabled\n");
5499                return;
5500        }
5501
5502        wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs(GFP_KERNEL);
5503        BUG_ON(!wq_update_unbound_numa_attrs_buf);
5504
5505        /*
5506         * We want masks of possible CPUs of each node which isn't readily
5507         * available.  Build one from cpu_to_node() which should have been
5508         * fully initialized by now.
5509         */
5510        tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL);
5511        BUG_ON(!tbl);
5512
5513        for_each_node(node)
5514                BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
5515                                node_online(node) ? node : NUMA_NO_NODE));
5516
5517        for_each_possible_cpu(cpu) {
5518                node = cpu_to_node(cpu);
5519                if (WARN_ON(node == NUMA_NO_NODE)) {
5520                        pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
5521                        /* happens iff arch is bonkers, let's just proceed */
5522                        return;
5523                }
5524                cpumask_set_cpu(cpu, tbl[node]);
5525        }
5526
5527        wq_numa_possible_cpumask = tbl;
5528        wq_numa_enabled = true;
5529}
5530
5531/**
5532 * workqueue_init_early - early init for workqueue subsystem
5533 *
5534 * This is the first half of two-staged workqueue subsystem initialization
5535 * and invoked as soon as the bare basics - memory allocation, cpumasks and
5536 * idr are up.  It sets up all the data structures and system workqueues
5537 * and allows early boot code to create workqueues and queue/cancel work
5538 * items.  Actual work item execution starts only after kthreads can be
5539 * created and scheduled right before early initcalls.
5540 */
5541int __init workqueue_init_early(void)
5542{
5543        int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5544        int i, cpu;
5545
5546        WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5547
5548        BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
5549        cpumask_copy(wq_unbound_cpumask, cpu_possible_mask);
5550
5551        pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
5552
5553        /* initialize CPU pools */
5554        for_each_possible_cpu(cpu) {
5555                struct worker_pool *pool;
5556
5557                i = 0;
5558                for_each_cpu_worker_pool(pool, cpu) {
5559                        BUG_ON(init_worker_pool(pool));
5560                        pool->cpu = cpu;
5561                        cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
5562                        pool->attrs->nice = std_nice[i++];
5563                        pool->node = cpu_to_node(cpu);
5564
5565                        /* alloc pool ID */
5566                        mutex_lock(&wq_pool_mutex);
5567                        BUG_ON(worker_pool_assign_id(pool));
5568                        mutex_unlock(&wq_pool_mutex);
5569                }
5570        }
5571
5572        /* create default unbound and ordered wq attrs */
5573        for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5574                struct workqueue_attrs *attrs;
5575
5576                BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5577                attrs->nice = std_nice[i];
5578                unbound_std_wq_attrs[i] = attrs;
5579
5580                /*
5581                 * An ordered wq should have only one pwq as ordering is
5582                 * guaranteed by max_active which is enforced by pwqs.
5583                 * Turn off NUMA so that dfl_pwq is used for all nodes.
5584                 */
5585                BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
5586                attrs->nice = std_nice[i];
5587                attrs->no_numa = true;
5588                ordered_wq_attrs[i] = attrs;
5589        }
5590
5591        system_wq = alloc_workqueue("events", 0, 0);
5592        system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
5593        system_long_wq = alloc_workqueue("events_long", 0, 0);
5594        system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
5595                                            WQ_UNBOUND_MAX_ACTIVE);
5596        system_freezable_wq = alloc_workqueue("events_freezable",
5597                                              WQ_FREEZABLE, 0);
5598        system_power_efficient_wq = alloc_workqueue("events_power_efficient",
5599                                              WQ_POWER_EFFICIENT, 0);
5600        system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
5601                                              WQ_FREEZABLE | WQ_POWER_EFFICIENT,
5602                                              0);
5603        BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
5604               !system_unbound_wq || !system_freezable_wq ||
5605               !system_power_efficient_wq ||
5606               !system_freezable_power_efficient_wq);
5607
5608        return 0;
5609}
5610
5611/**
5612 * workqueue_init - bring workqueue subsystem fully online
5613 *
5614 * This is the latter half of two-staged workqueue subsystem initialization
5615 * and invoked as soon as kthreads can be created and scheduled.
5616 * Workqueues have been created and work items queued on them, but there
5617 * are no kworkers executing the work items yet.  Populate the worker pools
5618 * with the initial workers and enable future kworker creations.
5619 */
5620int __init workqueue_init(void)
5621{
5622        struct workqueue_struct *wq;
5623        struct worker_pool *pool;
5624        int cpu, bkt;
5625
5626        /*
5627         * It'd be simpler to initialize NUMA in workqueue_init_early() but
5628         * CPU to node mapping may not be available that early on some
5629         * archs such as power and arm64.  As per-cpu pools created
5630         * previously could be missing node hint and unbound pools NUMA
5631         * affinity, fix them up.
5632         */
5633        wq_numa_init();
5634
5635        mutex_lock(&wq_pool_mutex);
5636
5637        for_each_possible_cpu(cpu) {
5638                for_each_cpu_worker_pool(pool, cpu) {
5639                        pool->node = cpu_to_node(cpu);
5640                }
5641        }
5642
5643        list_for_each_entry(wq, &workqueues, list)
5644                wq_update_unbound_numa(wq, smp_processor_id(), true);
5645
5646        mutex_unlock(&wq_pool_mutex);
5647
5648        /* create the initial workers */
5649        for_each_online_cpu(cpu) {
5650                for_each_cpu_worker_pool(pool, cpu) {
5651                        pool->flags &= ~POOL_DISASSOCIATED;
5652                        BUG_ON(!create_worker(pool));
5653                }
5654        }
5655
5656        hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
5657                BUG_ON(!create_worker(pool));
5658
5659        wq_online = true;
5660        wq_watchdog_init();
5661
5662        return 0;
5663}
5664