linux/kernel/workqueue.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * kernel/workqueue.c - generic async execution with shared worker pool
   4 *
   5 * Copyright (C) 2002           Ingo Molnar
   6 *
   7 *   Derived from the taskqueue/keventd code by:
   8 *     David Woodhouse <dwmw2@infradead.org>
   9 *     Andrew Morton
  10 *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
  11 *     Theodore Ts'o <tytso@mit.edu>
  12 *
  13 * Made to use alloc_percpu by Christoph Lameter.
  14 *
  15 * Copyright (C) 2010           SUSE Linux Products GmbH
  16 * Copyright (C) 2010           Tejun Heo <tj@kernel.org>
  17 *
  18 * This is the generic async execution mechanism.  Work items as are
  19 * executed in process context.  The worker pool is shared and
  20 * automatically managed.  There are two worker pools for each CPU (one for
  21 * normal work items and the other for high priority ones) and some extra
  22 * pools for workqueues which are not bound to any specific CPU - the
  23 * number of these backing pools is dynamic.
  24 *
  25 * Please read Documentation/core-api/workqueue.rst for details.
  26 */
  27
  28#include <linux/export.h>
  29#include <linux/kernel.h>
  30#include <linux/sched.h>
  31#include <linux/init.h>
  32#include <linux/signal.h>
  33#include <linux/completion.h>
  34#include <linux/workqueue.h>
  35#include <linux/slab.h>
  36#include <linux/cpu.h>
  37#include <linux/notifier.h>
  38#include <linux/kthread.h>
  39#include <linux/hardirq.h>
  40#include <linux/mempolicy.h>
  41#include <linux/freezer.h>
  42#include <linux/debug_locks.h>
  43#include <linux/lockdep.h>
  44#include <linux/idr.h>
  45#include <linux/jhash.h>
  46#include <linux/hashtable.h>
  47#include <linux/rculist.h>
  48#include <linux/nodemask.h>
  49#include <linux/moduleparam.h>
  50#include <linux/uaccess.h>
  51#include <linux/sched/isolation.h>
  52#include <linux/nmi.h>
  53
  54#include "workqueue_internal.h"
  55
  56enum {
  57        /*
  58         * worker_pool flags
  59         *
  60         * A bound pool is either associated or disassociated with its CPU.
  61         * While associated (!DISASSOCIATED), all workers are bound to the
  62         * CPU and none has %WORKER_UNBOUND set and concurrency management
  63         * is in effect.
  64         *
  65         * While DISASSOCIATED, the cpu may be offline and all workers have
  66         * %WORKER_UNBOUND set and concurrency management disabled, and may
  67         * be executing on any CPU.  The pool behaves as an unbound one.
  68         *
  69         * Note that DISASSOCIATED should be flipped only while holding
  70         * wq_pool_attach_mutex to avoid changing binding state while
  71         * worker_attach_to_pool() is in progress.
  72         */
  73        POOL_MANAGER_ACTIVE     = 1 << 0,       /* being managed */
  74        POOL_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
  75
  76        /* worker flags */
  77        WORKER_DIE              = 1 << 1,       /* die die die */
  78        WORKER_IDLE             = 1 << 2,       /* is idle */
  79        WORKER_PREP             = 1 << 3,       /* preparing to run works */
  80        WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
  81        WORKER_UNBOUND          = 1 << 7,       /* worker is unbound */
  82        WORKER_REBOUND          = 1 << 8,       /* worker was rebound */
  83
  84        WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_CPU_INTENSIVE |
  85                                  WORKER_UNBOUND | WORKER_REBOUND,
  86
  87        NR_STD_WORKER_POOLS     = 2,            /* # standard pools per cpu */
  88
  89        UNBOUND_POOL_HASH_ORDER = 6,            /* hashed by pool->attrs */
  90        BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
  91
  92        MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
  93        IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
  94
  95        MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
  96                                                /* call for help after 10ms
  97                                                   (min two ticks) */
  98        MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
  99        CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
 100
 101        /*
 102         * Rescue workers are used only on emergencies and shared by
 103         * all cpus.  Give MIN_NICE.
 104         */
 105        RESCUER_NICE_LEVEL      = MIN_NICE,
 106        HIGHPRI_NICE_LEVEL      = MIN_NICE,
 107
 108        WQ_NAME_LEN             = 24,
 109};
 110
 111/*
 112 * Structure fields follow one of the following exclusion rules.
 113 *
 114 * I: Modifiable by initialization/destruction paths and read-only for
 115 *    everyone else.
 116 *
 117 * P: Preemption protected.  Disabling preemption is enough and should
 118 *    only be modified and accessed from the local cpu.
 119 *
 120 * L: pool->lock protected.  Access with pool->lock held.
 121 *
 122 * X: During normal operation, modification requires pool->lock and should
 123 *    be done only from local cpu.  Either disabling preemption on local
 124 *    cpu or grabbing pool->lock is enough for read access.  If
 125 *    POOL_DISASSOCIATED is set, it's identical to L.
 126 *
 127 * A: wq_pool_attach_mutex protected.
 128 *
 129 * PL: wq_pool_mutex protected.
 130 *
 131 * PR: wq_pool_mutex protected for writes.  RCU protected for reads.
 132 *
 133 * PW: wq_pool_mutex and wq->mutex protected for writes.  Either for reads.
 134 *
 135 * PWR: wq_pool_mutex and wq->mutex protected for writes.  Either or
 136 *      RCU for reads.
 137 *
 138 * WQ: wq->mutex protected.
 139 *
 140 * WR: wq->mutex protected for writes.  RCU protected for reads.
 141 *
 142 * MD: wq_mayday_lock protected.
 143 */
 144
 145/* struct worker is defined in workqueue_internal.h */
 146
 147struct worker_pool {
 148        spinlock_t              lock;           /* the pool lock */
 149        int                     cpu;            /* I: the associated cpu */
 150        int                     node;           /* I: the associated node ID */
 151        int                     id;             /* I: pool ID */
 152        unsigned int            flags;          /* X: flags */
 153
 154        unsigned long           watchdog_ts;    /* L: watchdog timestamp */
 155
 156        struct list_head        worklist;       /* L: list of pending works */
 157
 158        int                     nr_workers;     /* L: total number of workers */
 159        int                     nr_idle;        /* L: currently idle workers */
 160
 161        struct list_head        idle_list;      /* X: list of idle workers */
 162        struct timer_list       idle_timer;     /* L: worker idle timeout */
 163        struct timer_list       mayday_timer;   /* L: SOS timer for workers */
 164
 165        /* a workers is either on busy_hash or idle_list, or the manager */
 166        DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER);
 167                                                /* L: hash of busy workers */
 168
 169        struct worker           *manager;       /* L: purely informational */
 170        struct list_head        workers;        /* A: attached workers */
 171        struct completion       *detach_completion; /* all workers detached */
 172
 173        struct ida              worker_ida;     /* worker IDs for task name */
 174
 175        struct workqueue_attrs  *attrs;         /* I: worker attributes */
 176        struct hlist_node       hash_node;      /* PL: unbound_pool_hash node */
 177        int                     refcnt;         /* PL: refcnt for unbound pools */
 178
 179        /*
 180         * The current concurrency level.  As it's likely to be accessed
 181         * from other CPUs during try_to_wake_up(), put it in a separate
 182         * cacheline.
 183         */
 184        atomic_t                nr_running ____cacheline_aligned_in_smp;
 185
 186        /*
 187         * Destruction of pool is RCU protected to allow dereferences
 188         * from get_work_pool().
 189         */
 190        struct rcu_head         rcu;
 191} ____cacheline_aligned_in_smp;
 192
 193/*
 194 * The per-pool workqueue.  While queued, the lower WORK_STRUCT_FLAG_BITS
 195 * of work_struct->data are used for flags and the remaining high bits
 196 * point to the pwq; thus, pwqs need to be aligned at two's power of the
 197 * number of flag bits.
 198 */
 199struct pool_workqueue {
 200        struct worker_pool      *pool;          /* I: the associated pool */
 201        struct workqueue_struct *wq;            /* I: the owning workqueue */
 202        int                     work_color;     /* L: current color */
 203        int                     flush_color;    /* L: flushing color */
 204        int                     refcnt;         /* L: reference count */
 205        int                     nr_in_flight[WORK_NR_COLORS];
 206                                                /* L: nr of in_flight works */
 207        int                     nr_active;      /* L: nr of active works */
 208        int                     max_active;     /* L: max active works */
 209        struct list_head        delayed_works;  /* L: delayed works */
 210        struct list_head        pwqs_node;      /* WR: node on wq->pwqs */
 211        struct list_head        mayday_node;    /* MD: node on wq->maydays */
 212
 213        /*
 214         * Release of unbound pwq is punted to system_wq.  See put_pwq()
 215         * and pwq_unbound_release_workfn() for details.  pool_workqueue
 216         * itself is also RCU protected so that the first pwq can be
 217         * determined without grabbing wq->mutex.
 218         */
 219        struct work_struct      unbound_release_work;
 220        struct rcu_head         rcu;
 221} __aligned(1 << WORK_STRUCT_FLAG_BITS);
 222
 223/*
 224 * Structure used to wait for workqueue flush.
 225 */
 226struct wq_flusher {
 227        struct list_head        list;           /* WQ: list of flushers */
 228        int                     flush_color;    /* WQ: flush color waiting for */
 229        struct completion       done;           /* flush completion */
 230};
 231
 232struct wq_device;
 233
 234/*
 235 * The externally visible workqueue.  It relays the issued work items to
 236 * the appropriate worker_pool through its pool_workqueues.
 237 */
 238struct workqueue_struct {
 239        struct list_head        pwqs;           /* WR: all pwqs of this wq */
 240        struct list_head        list;           /* PR: list of all workqueues */
 241
 242        struct mutex            mutex;          /* protects this wq */
 243        int                     work_color;     /* WQ: current work color */
 244        int                     flush_color;    /* WQ: current flush color */
 245        atomic_t                nr_pwqs_to_flush; /* flush in progress */
 246        struct wq_flusher       *first_flusher; /* WQ: first flusher */
 247        struct list_head        flusher_queue;  /* WQ: flush waiters */
 248        struct list_head        flusher_overflow; /* WQ: flush overflow list */
 249
 250        struct list_head        maydays;        /* MD: pwqs requesting rescue */
 251        struct worker           *rescuer;       /* I: rescue worker */
 252
 253        int                     nr_drainers;    /* WQ: drain in progress */
 254        int                     saved_max_active; /* WQ: saved pwq max_active */
 255
 256        struct workqueue_attrs  *unbound_attrs; /* PW: only for unbound wqs */
 257        struct pool_workqueue   *dfl_pwq;       /* PW: only for unbound wqs */
 258
 259#ifdef CONFIG_SYSFS
 260        struct wq_device        *wq_dev;        /* I: for sysfs interface */
 261#endif
 262#ifdef CONFIG_LOCKDEP
 263        char                    *lock_name;
 264        struct lock_class_key   key;
 265        struct lockdep_map      lockdep_map;
 266#endif
 267        char                    name[WQ_NAME_LEN]; /* I: workqueue name */
 268
 269        /*
 270         * Destruction of workqueue_struct is RCU protected to allow walking
 271         * the workqueues list without grabbing wq_pool_mutex.
 272         * This is used to dump all workqueues from sysrq.
 273         */
 274        struct rcu_head         rcu;
 275
 276        /* hot fields used during command issue, aligned to cacheline */
 277        unsigned int            flags ____cacheline_aligned; /* WQ: WQ_* flags */
 278        struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */
 279        struct pool_workqueue __rcu *numa_pwq_tbl[]; /* PWR: unbound pwqs indexed by node */
 280};
 281
 282static struct kmem_cache *pwq_cache;
 283
 284static cpumask_var_t *wq_numa_possible_cpumask;
 285                                        /* possible CPUs of each node */
 286
 287static bool wq_disable_numa;
 288module_param_named(disable_numa, wq_disable_numa, bool, 0444);
 289
 290/* see the comment above the definition of WQ_POWER_EFFICIENT */
 291static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
 292module_param_named(power_efficient, wq_power_efficient, bool, 0444);
 293
 294static bool wq_online;                  /* can kworkers be created yet? */
 295
 296static bool wq_numa_enabled;            /* unbound NUMA affinity enabled */
 297
 298/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
 299static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf;
 300
 301static DEFINE_MUTEX(wq_pool_mutex);     /* protects pools and workqueues list */
 302static DEFINE_MUTEX(wq_pool_attach_mutex); /* protects worker attach/detach */
 303static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
 304static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */
 305
 306static LIST_HEAD(workqueues);           /* PR: list of all workqueues */
 307static bool workqueue_freezing;         /* PL: have wqs started freezing? */
 308
 309/* PL: allowable cpus for unbound wqs and work items */
 310static cpumask_var_t wq_unbound_cpumask;
 311
 312/* CPU where unbound work was last round robin scheduled from this CPU */
 313static DEFINE_PER_CPU(int, wq_rr_cpu_last);
 314
 315/*
 316 * Local execution of unbound work items is no longer guaranteed.  The
 317 * following always forces round-robin CPU selection on unbound work items
 318 * to uncover usages which depend on it.
 319 */
 320#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
 321static bool wq_debug_force_rr_cpu = true;
 322#else
 323static bool wq_debug_force_rr_cpu = false;
 324#endif
 325module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
 326
 327/* the per-cpu worker pools */
 328static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools);
 329
 330static DEFINE_IDR(worker_pool_idr);     /* PR: idr of all pools */
 331
 332/* PL: hash of all unbound pools keyed by pool->attrs */
 333static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
 334
 335/* I: attributes used when instantiating standard unbound pools on demand */
 336static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
 337
 338/* I: attributes used when instantiating ordered pools on demand */
 339static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
 340
 341struct workqueue_struct *system_wq __read_mostly;
 342EXPORT_SYMBOL(system_wq);
 343struct workqueue_struct *system_highpri_wq __read_mostly;
 344EXPORT_SYMBOL_GPL(system_highpri_wq);
 345struct workqueue_struct *system_long_wq __read_mostly;
 346EXPORT_SYMBOL_GPL(system_long_wq);
 347struct workqueue_struct *system_unbound_wq __read_mostly;
 348EXPORT_SYMBOL_GPL(system_unbound_wq);
 349struct workqueue_struct *system_freezable_wq __read_mostly;
 350EXPORT_SYMBOL_GPL(system_freezable_wq);
 351struct workqueue_struct *system_power_efficient_wq __read_mostly;
 352EXPORT_SYMBOL_GPL(system_power_efficient_wq);
 353struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
 354EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
 355
 356static int worker_thread(void *__worker);
 357static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
 358
 359#define CREATE_TRACE_POINTS
 360#include <trace/events/workqueue.h>
 361
 362#define assert_rcu_or_pool_mutex()                                      \
 363        RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
 364                         !lockdep_is_held(&wq_pool_mutex),              \
 365                         "RCU or wq_pool_mutex should be held")
 366
 367#define assert_rcu_or_wq_mutex(wq)                                      \
 368        RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
 369                         !lockdep_is_held(&wq->mutex),                  \
 370                         "RCU or wq->mutex should be held")
 371
 372#define assert_rcu_or_wq_mutex_or_pool_mutex(wq)                        \
 373        RCU_LOCKDEP_WARN(!rcu_read_lock_held() &&                       \
 374                         !lockdep_is_held(&wq->mutex) &&                \
 375                         !lockdep_is_held(&wq_pool_mutex),              \
 376                         "RCU, wq->mutex or wq_pool_mutex should be held")
 377
 378#define for_each_cpu_worker_pool(pool, cpu)                             \
 379        for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0];               \
 380             (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
 381             (pool)++)
 382
 383/**
 384 * for_each_pool - iterate through all worker_pools in the system
 385 * @pool: iteration cursor
 386 * @pi: integer used for iteration
 387 *
 388 * This must be called either with wq_pool_mutex held or RCU read
 389 * locked.  If the pool needs to be used beyond the locking in effect, the
 390 * caller is responsible for guaranteeing that the pool stays online.
 391 *
 392 * The if/else clause exists only for the lockdep assertion and can be
 393 * ignored.
 394 */
 395#define for_each_pool(pool, pi)                                         \
 396        idr_for_each_entry(&worker_pool_idr, pool, pi)                  \
 397                if (({ assert_rcu_or_pool_mutex(); false; })) { }       \
 398                else
 399
 400/**
 401 * for_each_pool_worker - iterate through all workers of a worker_pool
 402 * @worker: iteration cursor
 403 * @pool: worker_pool to iterate workers of
 404 *
 405 * This must be called with wq_pool_attach_mutex.
 406 *
 407 * The if/else clause exists only for the lockdep assertion and can be
 408 * ignored.
 409 */
 410#define for_each_pool_worker(worker, pool)                              \
 411        list_for_each_entry((worker), &(pool)->workers, node)           \
 412                if (({ lockdep_assert_held(&wq_pool_attach_mutex); false; })) { } \
 413                else
 414
 415/**
 416 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
 417 * @pwq: iteration cursor
 418 * @wq: the target workqueue
 419 *
 420 * This must be called either with wq->mutex held or RCU read locked.
 421 * If the pwq needs to be used beyond the locking in effect, the caller is
 422 * responsible for guaranteeing that the pwq stays online.
 423 *
 424 * The if/else clause exists only for the lockdep assertion and can be
 425 * ignored.
 426 */
 427#define for_each_pwq(pwq, wq)                                           \
 428        list_for_each_entry_rcu((pwq), &(wq)->pwqs, pwqs_node)          \
 429                if (({ assert_rcu_or_wq_mutex(wq); false; })) { }       \
 430                else
 431
 432#ifdef CONFIG_DEBUG_OBJECTS_WORK
 433
 434static struct debug_obj_descr work_debug_descr;
 435
 436static void *work_debug_hint(void *addr)
 437{
 438        return ((struct work_struct *) addr)->func;
 439}
 440
 441static bool work_is_static_object(void *addr)
 442{
 443        struct work_struct *work = addr;
 444
 445        return test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work));
 446}
 447
 448/*
 449 * fixup_init is called when:
 450 * - an active object is initialized
 451 */
 452static bool work_fixup_init(void *addr, enum debug_obj_state state)
 453{
 454        struct work_struct *work = addr;
 455
 456        switch (state) {
 457        case ODEBUG_STATE_ACTIVE:
 458                cancel_work_sync(work);
 459                debug_object_init(work, &work_debug_descr);
 460                return true;
 461        default:
 462                return false;
 463        }
 464}
 465
 466/*
 467 * fixup_free is called when:
 468 * - an active object is freed
 469 */
 470static bool work_fixup_free(void *addr, enum debug_obj_state state)
 471{
 472        struct work_struct *work = addr;
 473
 474        switch (state) {
 475        case ODEBUG_STATE_ACTIVE:
 476                cancel_work_sync(work);
 477                debug_object_free(work, &work_debug_descr);
 478                return true;
 479        default:
 480                return false;
 481        }
 482}
 483
 484static struct debug_obj_descr work_debug_descr = {
 485        .name           = "work_struct",
 486        .debug_hint     = work_debug_hint,
 487        .is_static_object = work_is_static_object,
 488        .fixup_init     = work_fixup_init,
 489        .fixup_free     = work_fixup_free,
 490};
 491
 492static inline void debug_work_activate(struct work_struct *work)
 493{
 494        debug_object_activate(work, &work_debug_descr);
 495}
 496
 497static inline void debug_work_deactivate(struct work_struct *work)
 498{
 499        debug_object_deactivate(work, &work_debug_descr);
 500}
 501
 502void __init_work(struct work_struct *work, int onstack)
 503{
 504        if (onstack)
 505                debug_object_init_on_stack(work, &work_debug_descr);
 506        else
 507                debug_object_init(work, &work_debug_descr);
 508}
 509EXPORT_SYMBOL_GPL(__init_work);
 510
 511void destroy_work_on_stack(struct work_struct *work)
 512{
 513        debug_object_free(work, &work_debug_descr);
 514}
 515EXPORT_SYMBOL_GPL(destroy_work_on_stack);
 516
 517void destroy_delayed_work_on_stack(struct delayed_work *work)
 518{
 519        destroy_timer_on_stack(&work->timer);
 520        debug_object_free(&work->work, &work_debug_descr);
 521}
 522EXPORT_SYMBOL_GPL(destroy_delayed_work_on_stack);
 523
 524#else
 525static inline void debug_work_activate(struct work_struct *work) { }
 526static inline void debug_work_deactivate(struct work_struct *work) { }
 527#endif
 528
 529/**
 530 * worker_pool_assign_id - allocate ID and assing it to @pool
 531 * @pool: the pool pointer of interest
 532 *
 533 * Returns 0 if ID in [0, WORK_OFFQ_POOL_NONE) is allocated and assigned
 534 * successfully, -errno on failure.
 535 */
 536static int worker_pool_assign_id(struct worker_pool *pool)
 537{
 538        int ret;
 539
 540        lockdep_assert_held(&wq_pool_mutex);
 541
 542        ret = idr_alloc(&worker_pool_idr, pool, 0, WORK_OFFQ_POOL_NONE,
 543                        GFP_KERNEL);
 544        if (ret >= 0) {
 545                pool->id = ret;
 546                return 0;
 547        }
 548        return ret;
 549}
 550
 551/**
 552 * unbound_pwq_by_node - return the unbound pool_workqueue for the given node
 553 * @wq: the target workqueue
 554 * @node: the node ID
 555 *
 556 * This must be called with any of wq_pool_mutex, wq->mutex or RCU
 557 * read locked.
 558 * If the pwq needs to be used beyond the locking in effect, the caller is
 559 * responsible for guaranteeing that the pwq stays online.
 560 *
 561 * Return: The unbound pool_workqueue for @node.
 562 */
 563static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq,
 564                                                  int node)
 565{
 566        assert_rcu_or_wq_mutex_or_pool_mutex(wq);
 567
 568        /*
 569         * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
 570         * delayed item is pending.  The plan is to keep CPU -> NODE
 571         * mapping valid and stable across CPU on/offlines.  Once that
 572         * happens, this workaround can be removed.
 573         */
 574        if (unlikely(node == NUMA_NO_NODE))
 575                return wq->dfl_pwq;
 576
 577        return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
 578}
 579
 580static unsigned int work_color_to_flags(int color)
 581{
 582        return color << WORK_STRUCT_COLOR_SHIFT;
 583}
 584
 585static int get_work_color(struct work_struct *work)
 586{
 587        return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
 588                ((1 << WORK_STRUCT_COLOR_BITS) - 1);
 589}
 590
 591static int work_next_color(int color)
 592{
 593        return (color + 1) % WORK_NR_COLORS;
 594}
 595
 596/*
 597 * While queued, %WORK_STRUCT_PWQ is set and non flag bits of a work's data
 598 * contain the pointer to the queued pwq.  Once execution starts, the flag
 599 * is cleared and the high bits contain OFFQ flags and pool ID.
 600 *
 601 * set_work_pwq(), set_work_pool_and_clear_pending(), mark_work_canceling()
 602 * and clear_work_data() can be used to set the pwq, pool or clear
 603 * work->data.  These functions should only be called while the work is
 604 * owned - ie. while the PENDING bit is set.
 605 *
 606 * get_work_pool() and get_work_pwq() can be used to obtain the pool or pwq
 607 * corresponding to a work.  Pool is available once the work has been
 608 * queued anywhere after initialization until it is sync canceled.  pwq is
 609 * available only while the work item is queued.
 610 *
 611 * %WORK_OFFQ_CANCELING is used to mark a work item which is being
 612 * canceled.  While being canceled, a work item may have its PENDING set
 613 * but stay off timer and worklist for arbitrarily long and nobody should
 614 * try to steal the PENDING bit.
 615 */
 616static inline void set_work_data(struct work_struct *work, unsigned long data,
 617                                 unsigned long flags)
 618{
 619        WARN_ON_ONCE(!work_pending(work));
 620        atomic_long_set(&work->data, data | flags | work_static(work));
 621}
 622
 623static void set_work_pwq(struct work_struct *work, struct pool_workqueue *pwq,
 624                         unsigned long extra_flags)
 625{
 626        set_work_data(work, (unsigned long)pwq,
 627                      WORK_STRUCT_PENDING | WORK_STRUCT_PWQ | extra_flags);
 628}
 629
 630static void set_work_pool_and_keep_pending(struct work_struct *work,
 631                                           int pool_id)
 632{
 633        set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT,
 634                      WORK_STRUCT_PENDING);
 635}
 636
 637static void set_work_pool_and_clear_pending(struct work_struct *work,
 638                                            int pool_id)
 639{
 640        /*
 641         * The following wmb is paired with the implied mb in
 642         * test_and_set_bit(PENDING) and ensures all updates to @work made
 643         * here are visible to and precede any updates by the next PENDING
 644         * owner.
 645         */
 646        smp_wmb();
 647        set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
 648        /*
 649         * The following mb guarantees that previous clear of a PENDING bit
 650         * will not be reordered with any speculative LOADS or STORES from
 651         * work->current_func, which is executed afterwards.  This possible
 652         * reordering can lead to a missed execution on attempt to queue
 653         * the same @work.  E.g. consider this case:
 654         *
 655         *   CPU#0                         CPU#1
 656         *   ----------------------------  --------------------------------
 657         *
 658         * 1  STORE event_indicated
 659         * 2  queue_work_on() {
 660         * 3    test_and_set_bit(PENDING)
 661         * 4 }                             set_..._and_clear_pending() {
 662         * 5                                 set_work_data() # clear bit
 663         * 6                                 smp_mb()
 664         * 7                               work->current_func() {
 665         * 8                                  LOAD event_indicated
 666         *                                 }
 667         *
 668         * Without an explicit full barrier speculative LOAD on line 8 can
 669         * be executed before CPU#0 does STORE on line 1.  If that happens,
 670         * CPU#0 observes the PENDING bit is still set and new execution of
 671         * a @work is not queued in a hope, that CPU#1 will eventually
 672         * finish the queued @work.  Meanwhile CPU#1 does not see
 673         * event_indicated is set, because speculative LOAD was executed
 674         * before actual STORE.
 675         */
 676        smp_mb();
 677}
 678
 679static void clear_work_data(struct work_struct *work)
 680{
 681        smp_wmb();      /* see set_work_pool_and_clear_pending() */
 682        set_work_data(work, WORK_STRUCT_NO_POOL, 0);
 683}
 684
 685static struct pool_workqueue *get_work_pwq(struct work_struct *work)
 686{
 687        unsigned long data = atomic_long_read(&work->data);
 688
 689        if (data & WORK_STRUCT_PWQ)
 690                return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
 691        else
 692                return NULL;
 693}
 694
 695/**
 696 * get_work_pool - return the worker_pool a given work was associated with
 697 * @work: the work item of interest
 698 *
 699 * Pools are created and destroyed under wq_pool_mutex, and allows read
 700 * access under RCU read lock.  As such, this function should be
 701 * called under wq_pool_mutex or inside of a rcu_read_lock() region.
 702 *
 703 * All fields of the returned pool are accessible as long as the above
 704 * mentioned locking is in effect.  If the returned pool needs to be used
 705 * beyond the critical section, the caller is responsible for ensuring the
 706 * returned pool is and stays online.
 707 *
 708 * Return: The worker_pool @work was last associated with.  %NULL if none.
 709 */
 710static struct worker_pool *get_work_pool(struct work_struct *work)
 711{
 712        unsigned long data = atomic_long_read(&work->data);
 713        int pool_id;
 714
 715        assert_rcu_or_pool_mutex();
 716
 717        if (data & WORK_STRUCT_PWQ)
 718                return ((struct pool_workqueue *)
 719                        (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
 720
 721        pool_id = data >> WORK_OFFQ_POOL_SHIFT;
 722        if (pool_id == WORK_OFFQ_POOL_NONE)
 723                return NULL;
 724
 725        return idr_find(&worker_pool_idr, pool_id);
 726}
 727
 728/**
 729 * get_work_pool_id - return the worker pool ID a given work is associated with
 730 * @work: the work item of interest
 731 *
 732 * Return: The worker_pool ID @work was last associated with.
 733 * %WORK_OFFQ_POOL_NONE if none.
 734 */
 735static int get_work_pool_id(struct work_struct *work)
 736{
 737        unsigned long data = atomic_long_read(&work->data);
 738
 739        if (data & WORK_STRUCT_PWQ)
 740                return ((struct pool_workqueue *)
 741                        (data & WORK_STRUCT_WQ_DATA_MASK))->pool->id;
 742
 743        return data >> WORK_OFFQ_POOL_SHIFT;
 744}
 745
 746static void mark_work_canceling(struct work_struct *work)
 747{
 748        unsigned long pool_id = get_work_pool_id(work);
 749
 750        pool_id <<= WORK_OFFQ_POOL_SHIFT;
 751        set_work_data(work, pool_id | WORK_OFFQ_CANCELING, WORK_STRUCT_PENDING);
 752}
 753
 754static bool work_is_canceling(struct work_struct *work)
 755{
 756        unsigned long data = atomic_long_read(&work->data);
 757
 758        return !(data & WORK_STRUCT_PWQ) && (data & WORK_OFFQ_CANCELING);
 759}
 760
 761/*
 762 * Policy functions.  These define the policies on how the global worker
 763 * pools are managed.  Unless noted otherwise, these functions assume that
 764 * they're being called with pool->lock held.
 765 */
 766
 767static bool __need_more_worker(struct worker_pool *pool)
 768{
 769        return !atomic_read(&pool->nr_running);
 770}
 771
 772/*
 773 * Need to wake up a worker?  Called from anything but currently
 774 * running workers.
 775 *
 776 * Note that, because unbound workers never contribute to nr_running, this
 777 * function will always return %true for unbound pools as long as the
 778 * worklist isn't empty.
 779 */
 780static bool need_more_worker(struct worker_pool *pool)
 781{
 782        return !list_empty(&pool->worklist) && __need_more_worker(pool);
 783}
 784
 785/* Can I start working?  Called from busy but !running workers. */
 786static bool may_start_working(struct worker_pool *pool)
 787{
 788        return pool->nr_idle;
 789}
 790
 791/* Do I need to keep working?  Called from currently running workers. */
 792static bool keep_working(struct worker_pool *pool)
 793{
 794        return !list_empty(&pool->worklist) &&
 795                atomic_read(&pool->nr_running) <= 1;
 796}
 797
 798/* Do we need a new worker?  Called from manager. */
 799static bool need_to_create_worker(struct worker_pool *pool)
 800{
 801        return need_more_worker(pool) && !may_start_working(pool);
 802}
 803
 804/* Do we have too many workers and should some go away? */
 805static bool too_many_workers(struct worker_pool *pool)
 806{
 807        bool managing = pool->flags & POOL_MANAGER_ACTIVE;
 808        int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
 809        int nr_busy = pool->nr_workers - nr_idle;
 810
 811        return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
 812}
 813
 814/*
 815 * Wake up functions.
 816 */
 817
 818/* Return the first idle worker.  Safe with preemption disabled */
 819static struct worker *first_idle_worker(struct worker_pool *pool)
 820{
 821        if (unlikely(list_empty(&pool->idle_list)))
 822                return NULL;
 823
 824        return list_first_entry(&pool->idle_list, struct worker, entry);
 825}
 826
 827/**
 828 * wake_up_worker - wake up an idle worker
 829 * @pool: worker pool to wake worker from
 830 *
 831 * Wake up the first idle worker of @pool.
 832 *
 833 * CONTEXT:
 834 * spin_lock_irq(pool->lock).
 835 */
 836static void wake_up_worker(struct worker_pool *pool)
 837{
 838        struct worker *worker = first_idle_worker(pool);
 839
 840        if (likely(worker))
 841                wake_up_process(worker->task);
 842}
 843
 844/**
 845 * wq_worker_running - a worker is running again
 846 * @task: task waking up
 847 *
 848 * This function is called when a worker returns from schedule()
 849 */
 850void wq_worker_running(struct task_struct *task)
 851{
 852        struct worker *worker = kthread_data(task);
 853
 854        if (!worker->sleeping)
 855                return;
 856        if (!(worker->flags & WORKER_NOT_RUNNING))
 857                atomic_inc(&worker->pool->nr_running);
 858        worker->sleeping = 0;
 859}
 860
 861/**
 862 * wq_worker_sleeping - a worker is going to sleep
 863 * @task: task going to sleep
 864 *
 865 * This function is called from schedule() when a busy worker is
 866 * going to sleep.
 867 */
 868void wq_worker_sleeping(struct task_struct *task)
 869{
 870        struct worker *next, *worker = kthread_data(task);
 871        struct worker_pool *pool;
 872
 873        /*
 874         * Rescuers, which may not have all the fields set up like normal
 875         * workers, also reach here, let's not access anything before
 876         * checking NOT_RUNNING.
 877         */
 878        if (worker->flags & WORKER_NOT_RUNNING)
 879                return;
 880
 881        pool = worker->pool;
 882
 883        if (WARN_ON_ONCE(worker->sleeping))
 884                return;
 885
 886        worker->sleeping = 1;
 887        spin_lock_irq(&pool->lock);
 888
 889        /*
 890         * The counterpart of the following dec_and_test, implied mb,
 891         * worklist not empty test sequence is in insert_work().
 892         * Please read comment there.
 893         *
 894         * NOT_RUNNING is clear.  This means that we're bound to and
 895         * running on the local cpu w/ rq lock held and preemption
 896         * disabled, which in turn means that none else could be
 897         * manipulating idle_list, so dereferencing idle_list without pool
 898         * lock is safe.
 899         */
 900        if (atomic_dec_and_test(&pool->nr_running) &&
 901            !list_empty(&pool->worklist)) {
 902                next = first_idle_worker(pool);
 903                if (next)
 904                        wake_up_process(next->task);
 905        }
 906        spin_unlock_irq(&pool->lock);
 907}
 908
 909/**
 910 * wq_worker_last_func - retrieve worker's last work function
 911 * @task: Task to retrieve last work function of.
 912 *
 913 * Determine the last function a worker executed. This is called from
 914 * the scheduler to get a worker's last known identity.
 915 *
 916 * CONTEXT:
 917 * spin_lock_irq(rq->lock)
 918 *
 919 * This function is called during schedule() when a kworker is going
 920 * to sleep. It's used by psi to identify aggregation workers during
 921 * dequeuing, to allow periodic aggregation to shut-off when that
 922 * worker is the last task in the system or cgroup to go to sleep.
 923 *
 924 * As this function doesn't involve any workqueue-related locking, it
 925 * only returns stable values when called from inside the scheduler's
 926 * queuing and dequeuing paths, when @task, which must be a kworker,
 927 * is guaranteed to not be processing any works.
 928 *
 929 * Return:
 930 * The last work function %current executed as a worker, NULL if it
 931 * hasn't executed any work yet.
 932 */
 933work_func_t wq_worker_last_func(struct task_struct *task)
 934{
 935        struct worker *worker = kthread_data(task);
 936
 937        return worker->last_func;
 938}
 939
 940/**
 941 * worker_set_flags - set worker flags and adjust nr_running accordingly
 942 * @worker: self
 943 * @flags: flags to set
 944 *
 945 * Set @flags in @worker->flags and adjust nr_running accordingly.
 946 *
 947 * CONTEXT:
 948 * spin_lock_irq(pool->lock)
 949 */
 950static inline void worker_set_flags(struct worker *worker, unsigned int flags)
 951{
 952        struct worker_pool *pool = worker->pool;
 953
 954        WARN_ON_ONCE(worker->task != current);
 955
 956        /* If transitioning into NOT_RUNNING, adjust nr_running. */
 957        if ((flags & WORKER_NOT_RUNNING) &&
 958            !(worker->flags & WORKER_NOT_RUNNING)) {
 959                atomic_dec(&pool->nr_running);
 960        }
 961
 962        worker->flags |= flags;
 963}
 964
 965/**
 966 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
 967 * @worker: self
 968 * @flags: flags to clear
 969 *
 970 * Clear @flags in @worker->flags and adjust nr_running accordingly.
 971 *
 972 * CONTEXT:
 973 * spin_lock_irq(pool->lock)
 974 */
 975static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 976{
 977        struct worker_pool *pool = worker->pool;
 978        unsigned int oflags = worker->flags;
 979
 980        WARN_ON_ONCE(worker->task != current);
 981
 982        worker->flags &= ~flags;
 983
 984        /*
 985         * If transitioning out of NOT_RUNNING, increment nr_running.  Note
 986         * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
 987         * of multiple flags, not a single flag.
 988         */
 989        if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
 990                if (!(worker->flags & WORKER_NOT_RUNNING))
 991                        atomic_inc(&pool->nr_running);
 992}
 993
 994/**
 995 * find_worker_executing_work - find worker which is executing a work
 996 * @pool: pool of interest
 997 * @work: work to find worker for
 998 *
 999 * Find a worker which is executing @work on @pool by searching
1000 * @pool->busy_hash which is keyed by the address of @work.  For a worker
1001 * to match, its current execution should match the address of @work and
1002 * its work function.  This is to avoid unwanted dependency between
1003 * unrelated work executions through a work item being recycled while still
1004 * being executed.
1005 *
1006 * This is a bit tricky.  A work item may be freed once its execution
1007 * starts and nothing prevents the freed area from being recycled for
1008 * another work item.  If the same work item address ends up being reused
1009 * before the original execution finishes, workqueue will identify the
1010 * recycled work item as currently executing and make it wait until the
1011 * current execution finishes, introducing an unwanted dependency.
1012 *
1013 * This function checks the work item address and work function to avoid
1014 * false positives.  Note that this isn't complete as one may construct a
1015 * work function which can introduce dependency onto itself through a
1016 * recycled work item.  Well, if somebody wants to shoot oneself in the
1017 * foot that badly, there's only so much we can do, and if such deadlock
1018 * actually occurs, it should be easy to locate the culprit work function.
1019 *
1020 * CONTEXT:
1021 * spin_lock_irq(pool->lock).
1022 *
1023 * Return:
1024 * Pointer to worker which is executing @work if found, %NULL
1025 * otherwise.
1026 */
1027static struct worker *find_worker_executing_work(struct worker_pool *pool,
1028                                                 struct work_struct *work)
1029{
1030        struct worker *worker;
1031
1032        hash_for_each_possible(pool->busy_hash, worker, hentry,
1033                               (unsigned long)work)
1034                if (worker->current_work == work &&
1035                    worker->current_func == work->func)
1036                        return worker;
1037
1038        return NULL;
1039}
1040
1041/**
1042 * move_linked_works - move linked works to a list
1043 * @work: start of series of works to be scheduled
1044 * @head: target list to append @work to
1045 * @nextp: out parameter for nested worklist walking
1046 *
1047 * Schedule linked works starting from @work to @head.  Work series to
1048 * be scheduled starts at @work and includes any consecutive work with
1049 * WORK_STRUCT_LINKED set in its predecessor.
1050 *
1051 * If @nextp is not NULL, it's updated to point to the next work of
1052 * the last scheduled work.  This allows move_linked_works() to be
1053 * nested inside outer list_for_each_entry_safe().
1054 *
1055 * CONTEXT:
1056 * spin_lock_irq(pool->lock).
1057 */
1058static void move_linked_works(struct work_struct *work, struct list_head *head,
1059                              struct work_struct **nextp)
1060{
1061        struct work_struct *n;
1062
1063        /*
1064         * Linked worklist will always end before the end of the list,
1065         * use NULL for list head.
1066         */
1067        list_for_each_entry_safe_from(work, n, NULL, entry) {
1068                list_move_tail(&work->entry, head);
1069                if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1070                        break;
1071        }
1072
1073        /*
1074         * If we're already inside safe list traversal and have moved
1075         * multiple works to the scheduled queue, the next position
1076         * needs to be updated.
1077         */
1078        if (nextp)
1079                *nextp = n;
1080}
1081
1082/**
1083 * get_pwq - get an extra reference on the specified pool_workqueue
1084 * @pwq: pool_workqueue to get
1085 *
1086 * Obtain an extra reference on @pwq.  The caller should guarantee that
1087 * @pwq has positive refcnt and be holding the matching pool->lock.
1088 */
1089static void get_pwq(struct pool_workqueue *pwq)
1090{
1091        lockdep_assert_held(&pwq->pool->lock);
1092        WARN_ON_ONCE(pwq->refcnt <= 0);
1093        pwq->refcnt++;
1094}
1095
1096/**
1097 * put_pwq - put a pool_workqueue reference
1098 * @pwq: pool_workqueue to put
1099 *
1100 * Drop a reference of @pwq.  If its refcnt reaches zero, schedule its
1101 * destruction.  The caller should be holding the matching pool->lock.
1102 */
1103static void put_pwq(struct pool_workqueue *pwq)
1104{
1105        lockdep_assert_held(&pwq->pool->lock);
1106        if (likely(--pwq->refcnt))
1107                return;
1108        if (WARN_ON_ONCE(!(pwq->wq->flags & WQ_UNBOUND)))
1109                return;
1110        /*
1111         * @pwq can't be released under pool->lock, bounce to
1112         * pwq_unbound_release_workfn().  This never recurses on the same
1113         * pool->lock as this path is taken only for unbound workqueues and
1114         * the release work item is scheduled on a per-cpu workqueue.  To
1115         * avoid lockdep warning, unbound pool->locks are given lockdep
1116         * subclass of 1 in get_unbound_pool().
1117         */
1118        schedule_work(&pwq->unbound_release_work);
1119}
1120
1121/**
1122 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1123 * @pwq: pool_workqueue to put (can be %NULL)
1124 *
1125 * put_pwq() with locking.  This function also allows %NULL @pwq.
1126 */
1127static void put_pwq_unlocked(struct pool_workqueue *pwq)
1128{
1129        if (pwq) {
1130                /*
1131                 * As both pwqs and pools are RCU protected, the
1132                 * following lock operations are safe.
1133                 */
1134                spin_lock_irq(&pwq->pool->lock);
1135                put_pwq(pwq);
1136                spin_unlock_irq(&pwq->pool->lock);
1137        }
1138}
1139
1140static void pwq_activate_delayed_work(struct work_struct *work)
1141{
1142        struct pool_workqueue *pwq = get_work_pwq(work);
1143
1144        trace_workqueue_activate_work(work);
1145        if (list_empty(&pwq->pool->worklist))
1146                pwq->pool->watchdog_ts = jiffies;
1147        move_linked_works(work, &pwq->pool->worklist, NULL);
1148        __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1149        pwq->nr_active++;
1150}
1151
1152static void pwq_activate_first_delayed(struct pool_workqueue *pwq)
1153{
1154        struct work_struct *work = list_first_entry(&pwq->delayed_works,
1155                                                    struct work_struct, entry);
1156
1157        pwq_activate_delayed_work(work);
1158}
1159
1160/**
1161 * pwq_dec_nr_in_flight - decrement pwq's nr_in_flight
1162 * @pwq: pwq of interest
1163 * @color: color of work which left the queue
1164 *
1165 * A work either has completed or is removed from pending queue,
1166 * decrement nr_in_flight of its pwq and handle workqueue flushing.
1167 *
1168 * CONTEXT:
1169 * spin_lock_irq(pool->lock).
1170 */
1171static void pwq_dec_nr_in_flight(struct pool_workqueue *pwq, int color)
1172{
1173        /* uncolored work items don't participate in flushing or nr_active */
1174        if (color == WORK_NO_COLOR)
1175                goto out_put;
1176
1177        pwq->nr_in_flight[color]--;
1178
1179        pwq->nr_active--;
1180        if (!list_empty(&pwq->delayed_works)) {
1181                /* one down, submit a delayed one */
1182                if (pwq->nr_active < pwq->max_active)
1183                        pwq_activate_first_delayed(pwq);
1184        }
1185
1186        /* is flush in progress and are we at the flushing tip? */
1187        if (likely(pwq->flush_color != color))
1188                goto out_put;
1189
1190        /* are there still in-flight works? */
1191        if (pwq->nr_in_flight[color])
1192                goto out_put;
1193
1194        /* this pwq is done, clear flush_color */
1195        pwq->flush_color = -1;
1196
1197        /*
1198         * If this was the last pwq, wake up the first flusher.  It
1199         * will handle the rest.
1200         */
1201        if (atomic_dec_and_test(&pwq->wq->nr_pwqs_to_flush))
1202                complete(&pwq->wq->first_flusher->done);
1203out_put:
1204        put_pwq(pwq);
1205}
1206
1207/**
1208 * try_to_grab_pending - steal work item from worklist and disable irq
1209 * @work: work item to steal
1210 * @is_dwork: @work is a delayed_work
1211 * @flags: place to store irq state
1212 *
1213 * Try to grab PENDING bit of @work.  This function can handle @work in any
1214 * stable state - idle, on timer or on worklist.
1215 *
1216 * Return:
1217 *  1           if @work was pending and we successfully stole PENDING
1218 *  0           if @work was idle and we claimed PENDING
1219 *  -EAGAIN     if PENDING couldn't be grabbed at the moment, safe to busy-retry
1220 *  -ENOENT     if someone else is canceling @work, this state may persist
1221 *              for arbitrarily long
1222 *
1223 * Note:
1224 * On >= 0 return, the caller owns @work's PENDING bit.  To avoid getting
1225 * interrupted while holding PENDING and @work off queue, irq must be
1226 * disabled on entry.  This, combined with delayed_work->timer being
1227 * irqsafe, ensures that we return -EAGAIN for finite short period of time.
1228 *
1229 * On successful return, >= 0, irq is disabled and the caller is
1230 * responsible for releasing it using local_irq_restore(*@flags).
1231 *
1232 * This function is safe to call from any context including IRQ handler.
1233 */
1234static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1235                               unsigned long *flags)
1236{
1237        struct worker_pool *pool;
1238        struct pool_workqueue *pwq;
1239
1240        local_irq_save(*flags);
1241
1242        /* try to steal the timer if it exists */
1243        if (is_dwork) {
1244                struct delayed_work *dwork = to_delayed_work(work);
1245
1246                /*
1247                 * dwork->timer is irqsafe.  If del_timer() fails, it's
1248                 * guaranteed that the timer is not queued anywhere and not
1249                 * running on the local CPU.
1250                 */
1251                if (likely(del_timer(&dwork->timer)))
1252                        return 1;
1253        }
1254
1255        /* try to claim PENDING the normal way */
1256        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
1257                return 0;
1258
1259        rcu_read_lock();
1260        /*
1261         * The queueing is in progress, or it is already queued. Try to
1262         * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
1263         */
1264        pool = get_work_pool(work);
1265        if (!pool)
1266                goto fail;
1267
1268        spin_lock(&pool->lock);
1269        /*
1270         * work->data is guaranteed to point to pwq only while the work
1271         * item is queued on pwq->wq, and both updating work->data to point
1272         * to pwq on queueing and to pool on dequeueing are done under
1273         * pwq->pool->lock.  This in turn guarantees that, if work->data
1274         * points to pwq which is associated with a locked pool, the work
1275         * item is currently queued on that pool.
1276         */
1277        pwq = get_work_pwq(work);
1278        if (pwq && pwq->pool == pool) {
1279                debug_work_deactivate(work);
1280
1281                /*
1282                 * A delayed work item cannot be grabbed directly because
1283                 * it might have linked NO_COLOR work items which, if left
1284                 * on the delayed_list, will confuse pwq->nr_active
1285                 * management later on and cause stall.  Make sure the work
1286                 * item is activated before grabbing.
1287                 */
1288                if (*work_data_bits(work) & WORK_STRUCT_DELAYED)
1289                        pwq_activate_delayed_work(work);
1290
1291                list_del_init(&work->entry);
1292                pwq_dec_nr_in_flight(pwq, get_work_color(work));
1293
1294                /* work->data points to pwq iff queued, point to pool */
1295                set_work_pool_and_keep_pending(work, pool->id);
1296
1297                spin_unlock(&pool->lock);
1298                rcu_read_unlock();
1299                return 1;
1300        }
1301        spin_unlock(&pool->lock);
1302fail:
1303        rcu_read_unlock();
1304        local_irq_restore(*flags);
1305        if (work_is_canceling(work))
1306                return -ENOENT;
1307        cpu_relax();
1308        return -EAGAIN;
1309}
1310
1311/**
1312 * insert_work - insert a work into a pool
1313 * @pwq: pwq @work belongs to
1314 * @work: work to insert
1315 * @head: insertion point
1316 * @extra_flags: extra WORK_STRUCT_* flags to set
1317 *
1318 * Insert @work which belongs to @pwq after @head.  @extra_flags is or'd to
1319 * work_struct flags.
1320 *
1321 * CONTEXT:
1322 * spin_lock_irq(pool->lock).
1323 */
1324static void insert_work(struct pool_workqueue *pwq, struct work_struct *work,
1325                        struct list_head *head, unsigned int extra_flags)
1326{
1327        struct worker_pool *pool = pwq->pool;
1328
1329        /* we own @work, set data and link */
1330        set_work_pwq(work, pwq, extra_flags);
1331        list_add_tail(&work->entry, head);
1332        get_pwq(pwq);
1333
1334        /*
1335         * Ensure either wq_worker_sleeping() sees the above
1336         * list_add_tail() or we see zero nr_running to avoid workers lying
1337         * around lazily while there are works to be processed.
1338         */
1339        smp_mb();
1340
1341        if (__need_more_worker(pool))
1342                wake_up_worker(pool);
1343}
1344
1345/*
1346 * Test whether @work is being queued from another work executing on the
1347 * same workqueue.
1348 */
1349static bool is_chained_work(struct workqueue_struct *wq)
1350{
1351        struct worker *worker;
1352
1353        worker = current_wq_worker();
1354        /*
1355         * Return %true iff I'm a worker executing a work item on @wq.  If
1356         * I'm @worker, it's safe to dereference it without locking.
1357         */
1358        return worker && worker->current_pwq->wq == wq;
1359}
1360
1361/*
1362 * When queueing an unbound work item to a wq, prefer local CPU if allowed
1363 * by wq_unbound_cpumask.  Otherwise, round robin among the allowed ones to
1364 * avoid perturbing sensitive tasks.
1365 */
1366static int wq_select_unbound_cpu(int cpu)
1367{
1368        static bool printed_dbg_warning;
1369        int new_cpu;
1370
1371        if (likely(!wq_debug_force_rr_cpu)) {
1372                if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
1373                        return cpu;
1374        } else if (!printed_dbg_warning) {
1375                pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
1376                printed_dbg_warning = true;
1377        }
1378
1379        if (cpumask_empty(wq_unbound_cpumask))
1380                return cpu;
1381
1382        new_cpu = __this_cpu_read(wq_rr_cpu_last);
1383        new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
1384        if (unlikely(new_cpu >= nr_cpu_ids)) {
1385                new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
1386                if (unlikely(new_cpu >= nr_cpu_ids))
1387                        return cpu;
1388        }
1389        __this_cpu_write(wq_rr_cpu_last, new_cpu);
1390
1391        return new_cpu;
1392}
1393
1394static void __queue_work(int cpu, struct workqueue_struct *wq,
1395                         struct work_struct *work)
1396{
1397        struct pool_workqueue *pwq;
1398        struct worker_pool *last_pool;
1399        struct list_head *worklist;
1400        unsigned int work_flags;
1401        unsigned int req_cpu = cpu;
1402
1403        /*
1404         * While a work item is PENDING && off queue, a task trying to
1405         * steal the PENDING will busy-loop waiting for it to either get
1406         * queued or lose PENDING.  Grabbing PENDING and queueing should
1407         * happen with IRQ disabled.
1408         */
1409        lockdep_assert_irqs_disabled();
1410
1411        debug_work_activate(work);
1412
1413        /* if draining, only works from the same workqueue are allowed */
1414        if (unlikely(wq->flags & __WQ_DRAINING) &&
1415            WARN_ON_ONCE(!is_chained_work(wq)))
1416                return;
1417        rcu_read_lock();
1418retry:
1419        if (req_cpu == WORK_CPU_UNBOUND)
1420                cpu = wq_select_unbound_cpu(raw_smp_processor_id());
1421
1422        /* pwq which will be used unless @work is executing elsewhere */
1423        if (!(wq->flags & WQ_UNBOUND))
1424                pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1425        else
1426                pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
1427
1428        /*
1429         * If @work was previously on a different pool, it might still be
1430         * running there, in which case the work needs to be queued on that
1431         * pool to guarantee non-reentrancy.
1432         */
1433        last_pool = get_work_pool(work);
1434        if (last_pool && last_pool != pwq->pool) {
1435                struct worker *worker;
1436
1437                spin_lock(&last_pool->lock);
1438
1439                worker = find_worker_executing_work(last_pool, work);
1440
1441                if (worker && worker->current_pwq->wq == wq) {
1442                        pwq = worker->current_pwq;
1443                } else {
1444                        /* meh... not running there, queue here */
1445                        spin_unlock(&last_pool->lock);
1446                        spin_lock(&pwq->pool->lock);
1447                }
1448        } else {
1449                spin_lock(&pwq->pool->lock);
1450        }
1451
1452        /*
1453         * pwq is determined and locked.  For unbound pools, we could have
1454         * raced with pwq release and it could already be dead.  If its
1455         * refcnt is zero, repeat pwq selection.  Note that pwqs never die
1456         * without another pwq replacing it in the numa_pwq_tbl or while
1457         * work items are executing on it, so the retrying is guaranteed to
1458         * make forward-progress.
1459         */
1460        if (unlikely(!pwq->refcnt)) {
1461                if (wq->flags & WQ_UNBOUND) {
1462                        spin_unlock(&pwq->pool->lock);
1463                        cpu_relax();
1464                        goto retry;
1465                }
1466                /* oops */
1467                WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt",
1468                          wq->name, cpu);
1469        }
1470
1471        /* pwq determined, queue */
1472        trace_workqueue_queue_work(req_cpu, pwq, work);
1473
1474        if (WARN_ON(!list_empty(&work->entry)))
1475                goto out;
1476
1477        pwq->nr_in_flight[pwq->work_color]++;
1478        work_flags = work_color_to_flags(pwq->work_color);
1479
1480        if (likely(pwq->nr_active < pwq->max_active)) {
1481                trace_workqueue_activate_work(work);
1482                pwq->nr_active++;
1483                worklist = &pwq->pool->worklist;
1484                if (list_empty(worklist))
1485                        pwq->pool->watchdog_ts = jiffies;
1486        } else {
1487                work_flags |= WORK_STRUCT_DELAYED;
1488                worklist = &pwq->delayed_works;
1489        }
1490
1491        insert_work(pwq, work, worklist, work_flags);
1492
1493out:
1494        spin_unlock(&pwq->pool->lock);
1495        rcu_read_unlock();
1496}
1497
1498/**
1499 * queue_work_on - queue work on specific cpu
1500 * @cpu: CPU number to execute work on
1501 * @wq: workqueue to use
1502 * @work: work to queue
1503 *
1504 * We queue the work to a specific CPU, the caller must ensure it
1505 * can't go away.
1506 *
1507 * Return: %false if @work was already on a queue, %true otherwise.
1508 */
1509bool queue_work_on(int cpu, struct workqueue_struct *wq,
1510                   struct work_struct *work)
1511{
1512        bool ret = false;
1513        unsigned long flags;
1514
1515        local_irq_save(flags);
1516
1517        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1518                __queue_work(cpu, wq, work);
1519                ret = true;
1520        }
1521
1522        local_irq_restore(flags);
1523        return ret;
1524}
1525EXPORT_SYMBOL(queue_work_on);
1526
1527/**
1528 * workqueue_select_cpu_near - Select a CPU based on NUMA node
1529 * @node: NUMA node ID that we want to select a CPU from
1530 *
1531 * This function will attempt to find a "random" cpu available on a given
1532 * node. If there are no CPUs available on the given node it will return
1533 * WORK_CPU_UNBOUND indicating that we should just schedule to any
1534 * available CPU if we need to schedule this work.
1535 */
1536static int workqueue_select_cpu_near(int node)
1537{
1538        int cpu;
1539
1540        /* No point in doing this if NUMA isn't enabled for workqueues */
1541        if (!wq_numa_enabled)
1542                return WORK_CPU_UNBOUND;
1543
1544        /* Delay binding to CPU if node is not valid or online */
1545        if (node < 0 || node >= MAX_NUMNODES || !node_online(node))
1546                return WORK_CPU_UNBOUND;
1547
1548        /* Use local node/cpu if we are already there */
1549        cpu = raw_smp_processor_id();
1550        if (node == cpu_to_node(cpu))
1551                return cpu;
1552
1553        /* Use "random" otherwise know as "first" online CPU of node */
1554        cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
1555
1556        /* If CPU is valid return that, otherwise just defer */
1557        return cpu < nr_cpu_ids ? cpu : WORK_CPU_UNBOUND;
1558}
1559
1560/**
1561 * queue_work_node - queue work on a "random" cpu for a given NUMA node
1562 * @node: NUMA node that we are targeting the work for
1563 * @wq: workqueue to use
1564 * @work: work to queue
1565 *
1566 * We queue the work to a "random" CPU within a given NUMA node. The basic
1567 * idea here is to provide a way to somehow associate work with a given
1568 * NUMA node.
1569 *
1570 * This function will only make a best effort attempt at getting this onto
1571 * the right NUMA node. If no node is requested or the requested node is
1572 * offline then we just fall back to standard queue_work behavior.
1573 *
1574 * Currently the "random" CPU ends up being the first available CPU in the
1575 * intersection of cpu_online_mask and the cpumask of the node, unless we
1576 * are running on the node. In that case we just use the current CPU.
1577 *
1578 * Return: %false if @work was already on a queue, %true otherwise.
1579 */
1580bool queue_work_node(int node, struct workqueue_struct *wq,
1581                     struct work_struct *work)
1582{
1583        unsigned long flags;
1584        bool ret = false;
1585
1586        /*
1587         * This current implementation is specific to unbound workqueues.
1588         * Specifically we only return the first available CPU for a given
1589         * node instead of cycling through individual CPUs within the node.
1590         *
1591         * If this is used with a per-cpu workqueue then the logic in
1592         * workqueue_select_cpu_near would need to be updated to allow for
1593         * some round robin type logic.
1594         */
1595        WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND));
1596
1597        local_irq_save(flags);
1598
1599        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1600                int cpu = workqueue_select_cpu_near(node);
1601
1602                __queue_work(cpu, wq, work);
1603                ret = true;
1604        }
1605
1606        local_irq_restore(flags);
1607        return ret;
1608}
1609EXPORT_SYMBOL_GPL(queue_work_node);
1610
1611void delayed_work_timer_fn(struct timer_list *t)
1612{
1613        struct delayed_work *dwork = from_timer(dwork, t, timer);
1614
1615        /* should have been called from irqsafe timer with irq already off */
1616        __queue_work(dwork->cpu, dwork->wq, &dwork->work);
1617}
1618EXPORT_SYMBOL(delayed_work_timer_fn);
1619
1620static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
1621                                struct delayed_work *dwork, unsigned long delay)
1622{
1623        struct timer_list *timer = &dwork->timer;
1624        struct work_struct *work = &dwork->work;
1625
1626        WARN_ON_ONCE(!wq);
1627        WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
1628        WARN_ON_ONCE(timer_pending(timer));
1629        WARN_ON_ONCE(!list_empty(&work->entry));
1630
1631        /*
1632         * If @delay is 0, queue @dwork->work immediately.  This is for
1633         * both optimization and correctness.  The earliest @timer can
1634         * expire is on the closest next tick and delayed_work users depend
1635         * on that there's no such delay when @delay is 0.
1636         */
1637        if (!delay) {
1638                __queue_work(cpu, wq, &dwork->work);
1639                return;
1640        }
1641
1642        dwork->wq = wq;
1643        dwork->cpu = cpu;
1644        timer->expires = jiffies + delay;
1645
1646        if (unlikely(cpu != WORK_CPU_UNBOUND))
1647                add_timer_on(timer, cpu);
1648        else
1649                add_timer(timer);
1650}
1651
1652/**
1653 * queue_delayed_work_on - queue work on specific CPU after delay
1654 * @cpu: CPU number to execute work on
1655 * @wq: workqueue to use
1656 * @dwork: work to queue
1657 * @delay: number of jiffies to wait before queueing
1658 *
1659 * Return: %false if @work was already on a queue, %true otherwise.  If
1660 * @delay is zero and @dwork is idle, it will be scheduled for immediate
1661 * execution.
1662 */
1663bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1664                           struct delayed_work *dwork, unsigned long delay)
1665{
1666        struct work_struct *work = &dwork->work;
1667        bool ret = false;
1668        unsigned long flags;
1669
1670        /* read the comment in __queue_work() */
1671        local_irq_save(flags);
1672
1673        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1674                __queue_delayed_work(cpu, wq, dwork, delay);
1675                ret = true;
1676        }
1677
1678        local_irq_restore(flags);
1679        return ret;
1680}
1681EXPORT_SYMBOL(queue_delayed_work_on);
1682
1683/**
1684 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
1685 * @cpu: CPU number to execute work on
1686 * @wq: workqueue to use
1687 * @dwork: work to queue
1688 * @delay: number of jiffies to wait before queueing
1689 *
1690 * If @dwork is idle, equivalent to queue_delayed_work_on(); otherwise,
1691 * modify @dwork's timer so that it expires after @delay.  If @delay is
1692 * zero, @work is guaranteed to be scheduled immediately regardless of its
1693 * current state.
1694 *
1695 * Return: %false if @dwork was idle and queued, %true if @dwork was
1696 * pending and its timer was modified.
1697 *
1698 * This function is safe to call from any context including IRQ handler.
1699 * See try_to_grab_pending() for details.
1700 */
1701bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
1702                         struct delayed_work *dwork, unsigned long delay)
1703{
1704        unsigned long flags;
1705        int ret;
1706
1707        do {
1708                ret = try_to_grab_pending(&dwork->work, true, &flags);
1709        } while (unlikely(ret == -EAGAIN));
1710
1711        if (likely(ret >= 0)) {
1712                __queue_delayed_work(cpu, wq, dwork, delay);
1713                local_irq_restore(flags);
1714        }
1715
1716        /* -ENOENT from try_to_grab_pending() becomes %true */
1717        return ret;
1718}
1719EXPORT_SYMBOL_GPL(mod_delayed_work_on);
1720
1721static void rcu_work_rcufn(struct rcu_head *rcu)
1722{
1723        struct rcu_work *rwork = container_of(rcu, struct rcu_work, rcu);
1724
1725        /* read the comment in __queue_work() */
1726        local_irq_disable();
1727        __queue_work(WORK_CPU_UNBOUND, rwork->wq, &rwork->work);
1728        local_irq_enable();
1729}
1730
1731/**
1732 * queue_rcu_work - queue work after a RCU grace period
1733 * @wq: workqueue to use
1734 * @rwork: work to queue
1735 *
1736 * Return: %false if @rwork was already pending, %true otherwise.  Note
1737 * that a full RCU grace period is guaranteed only after a %true return.
1738 * While @rwork is guaranteed to be executed after a %false return, the
1739 * execution may happen before a full RCU grace period has passed.
1740 */
1741bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork)
1742{
1743        struct work_struct *work = &rwork->work;
1744
1745        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1746                rwork->wq = wq;
1747                call_rcu(&rwork->rcu, rcu_work_rcufn);
1748                return true;
1749        }
1750
1751        return false;
1752}
1753EXPORT_SYMBOL(queue_rcu_work);
1754
1755/**
1756 * worker_enter_idle - enter idle state
1757 * @worker: worker which is entering idle state
1758 *
1759 * @worker is entering idle state.  Update stats and idle timer if
1760 * necessary.
1761 *
1762 * LOCKING:
1763 * spin_lock_irq(pool->lock).
1764 */
1765static void worker_enter_idle(struct worker *worker)
1766{
1767        struct worker_pool *pool = worker->pool;
1768
1769        if (WARN_ON_ONCE(worker->flags & WORKER_IDLE) ||
1770            WARN_ON_ONCE(!list_empty(&worker->entry) &&
1771                         (worker->hentry.next || worker->hentry.pprev)))
1772                return;
1773
1774        /* can't use worker_set_flags(), also called from create_worker() */
1775        worker->flags |= WORKER_IDLE;
1776        pool->nr_idle++;
1777        worker->last_active = jiffies;
1778
1779        /* idle_list is LIFO */
1780        list_add(&worker->entry, &pool->idle_list);
1781
1782        if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
1783                mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
1784
1785        /*
1786         * Sanity check nr_running.  Because unbind_workers() releases
1787         * pool->lock between setting %WORKER_UNBOUND and zapping
1788         * nr_running, the warning may trigger spuriously.  Check iff
1789         * unbind is not in progress.
1790         */
1791        WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
1792                     pool->nr_workers == pool->nr_idle &&
1793                     atomic_read(&pool->nr_running));
1794}
1795
1796/**
1797 * worker_leave_idle - leave idle state
1798 * @worker: worker which is leaving idle state
1799 *
1800 * @worker is leaving idle state.  Update stats.
1801 *
1802 * LOCKING:
1803 * spin_lock_irq(pool->lock).
1804 */
1805static void worker_leave_idle(struct worker *worker)
1806{
1807        struct worker_pool *pool = worker->pool;
1808
1809        if (WARN_ON_ONCE(!(worker->flags & WORKER_IDLE)))
1810                return;
1811        worker_clr_flags(worker, WORKER_IDLE);
1812        pool->nr_idle--;
1813        list_del_init(&worker->entry);
1814}
1815
1816static struct worker *alloc_worker(int node)
1817{
1818        struct worker *worker;
1819
1820        worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1821        if (worker) {
1822                INIT_LIST_HEAD(&worker->entry);
1823                INIT_LIST_HEAD(&worker->scheduled);
1824                INIT_LIST_HEAD(&worker->node);
1825                /* on creation a worker is in !idle && prep state */
1826                worker->flags = WORKER_PREP;
1827        }
1828        return worker;
1829}
1830
1831/**
1832 * worker_attach_to_pool() - attach a worker to a pool
1833 * @worker: worker to be attached
1834 * @pool: the target pool
1835 *
1836 * Attach @worker to @pool.  Once attached, the %WORKER_UNBOUND flag and
1837 * cpu-binding of @worker are kept coordinated with the pool across
1838 * cpu-[un]hotplugs.
1839 */
1840static void worker_attach_to_pool(struct worker *worker,
1841                                   struct worker_pool *pool)
1842{
1843        mutex_lock(&wq_pool_attach_mutex);
1844
1845        /*
1846         * set_cpus_allowed_ptr() will fail if the cpumask doesn't have any
1847         * online CPUs.  It'll be re-applied when any of the CPUs come up.
1848         */
1849        set_cpus_allowed_ptr(worker->task, pool->attrs->cpumask);
1850
1851        /*
1852         * The wq_pool_attach_mutex ensures %POOL_DISASSOCIATED remains
1853         * stable across this function.  See the comments above the flag
1854         * definition for details.
1855         */
1856        if (pool->flags & POOL_DISASSOCIATED)
1857                worker->flags |= WORKER_UNBOUND;
1858
1859        list_add_tail(&worker->node, &pool->workers);
1860        worker->pool = pool;
1861
1862        mutex_unlock(&wq_pool_attach_mutex);
1863}
1864
1865/**
1866 * worker_detach_from_pool() - detach a worker from its pool
1867 * @worker: worker which is attached to its pool
1868 *
1869 * Undo the attaching which had been done in worker_attach_to_pool().  The
1870 * caller worker shouldn't access to the pool after detached except it has
1871 * other reference to the pool.
1872 */
1873static void worker_detach_from_pool(struct worker *worker)
1874{
1875        struct worker_pool *pool = worker->pool;
1876        struct completion *detach_completion = NULL;
1877
1878        mutex_lock(&wq_pool_attach_mutex);
1879
1880        list_del(&worker->node);
1881        worker->pool = NULL;
1882
1883        if (list_empty(&pool->workers))
1884                detach_completion = pool->detach_completion;
1885        mutex_unlock(&wq_pool_attach_mutex);
1886
1887        /* clear leftover flags without pool->lock after it is detached */
1888        worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1889
1890        if (detach_completion)
1891                complete(detach_completion);
1892}
1893
1894/**
1895 * create_worker - create a new workqueue worker
1896 * @pool: pool the new worker will belong to
1897 *
1898 * Create and start a new worker which is attached to @pool.
1899 *
1900 * CONTEXT:
1901 * Might sleep.  Does GFP_KERNEL allocations.
1902 *
1903 * Return:
1904 * Pointer to the newly created worker.
1905 */
1906static struct worker *create_worker(struct worker_pool *pool)
1907{
1908        struct worker *worker = NULL;
1909        int id = -1;
1910        char id_buf[16];
1911
1912        /* ID is needed to determine kthread name */
1913        id = ida_simple_get(&pool->worker_ida, 0, 0, GFP_KERNEL);
1914        if (id < 0)
1915                goto fail;
1916
1917        worker = alloc_worker(pool->node);
1918        if (!worker)
1919                goto fail;
1920
1921        worker->id = id;
1922
1923        if (pool->cpu >= 0)
1924                snprintf(id_buf, sizeof(id_buf), "%d:%d%s", pool->cpu, id,
1925                         pool->attrs->nice < 0  ? "H" : "");
1926        else
1927                snprintf(id_buf, sizeof(id_buf), "u%d:%d", pool->id, id);
1928
1929        worker->task = kthread_create_on_node(worker_thread, worker, pool->node,
1930                                              "kworker/%s", id_buf);
1931        if (IS_ERR(worker->task))
1932                goto fail;
1933
1934        set_user_nice(worker->task, pool->attrs->nice);
1935        kthread_bind_mask(worker->task, pool->attrs->cpumask);
1936
1937        /* successful, attach the worker to the pool */
1938        worker_attach_to_pool(worker, pool);
1939
1940        /* start the newly created worker */
1941        spin_lock_irq(&pool->lock);
1942        worker->pool->nr_workers++;
1943        worker_enter_idle(worker);
1944        wake_up_process(worker->task);
1945        spin_unlock_irq(&pool->lock);
1946
1947        return worker;
1948
1949fail:
1950        if (id >= 0)
1951                ida_simple_remove(&pool->worker_ida, id);
1952        kfree(worker);
1953        return NULL;
1954}
1955
1956/**
1957 * destroy_worker - destroy a workqueue worker
1958 * @worker: worker to be destroyed
1959 *
1960 * Destroy @worker and adjust @pool stats accordingly.  The worker should
1961 * be idle.
1962 *
1963 * CONTEXT:
1964 * spin_lock_irq(pool->lock).
1965 */
1966static void destroy_worker(struct worker *worker)
1967{
1968        struct worker_pool *pool = worker->pool;
1969
1970        lockdep_assert_held(&pool->lock);
1971
1972        /* sanity check frenzy */
1973        if (WARN_ON(worker->current_work) ||
1974            WARN_ON(!list_empty(&worker->scheduled)) ||
1975            WARN_ON(!(worker->flags & WORKER_IDLE)))
1976                return;
1977
1978        pool->nr_workers--;
1979        pool->nr_idle--;
1980
1981        list_del_init(&worker->entry);
1982        worker->flags |= WORKER_DIE;
1983        wake_up_process(worker->task);
1984}
1985
1986static void idle_worker_timeout(struct timer_list *t)
1987{
1988        struct worker_pool *pool = from_timer(pool, t, idle_timer);
1989
1990        spin_lock_irq(&pool->lock);
1991
1992        while (too_many_workers(pool)) {
1993                struct worker *worker;
1994                unsigned long expires;
1995
1996                /* idle_list is kept in LIFO order, check the last one */
1997                worker = list_entry(pool->idle_list.prev, struct worker, entry);
1998                expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1999
2000                if (time_before(jiffies, expires)) {
2001                        mod_timer(&pool->idle_timer, expires);
2002                        break;
2003                }
2004
2005                destroy_worker(worker);
2006        }
2007
2008        spin_unlock_irq(&pool->lock);
2009}
2010
2011static void send_mayday(struct work_struct *work)
2012{
2013        struct pool_workqueue *pwq = get_work_pwq(work);
2014        struct workqueue_struct *wq = pwq->wq;
2015
2016        lockdep_assert_held(&wq_mayday_lock);
2017
2018        if (!wq->rescuer)
2019                return;
2020
2021        /* mayday mayday mayday */
2022        if (list_empty(&pwq->mayday_node)) {
2023                /*
2024                 * If @pwq is for an unbound wq, its base ref may be put at
2025                 * any time due to an attribute change.  Pin @pwq until the
2026                 * rescuer is done with it.
2027                 */
2028                get_pwq(pwq);
2029                list_add_tail(&pwq->mayday_node, &wq->maydays);
2030                wake_up_process(wq->rescuer->task);
2031        }
2032}
2033
2034static void pool_mayday_timeout(struct timer_list *t)
2035{
2036        struct worker_pool *pool = from_timer(pool, t, mayday_timer);
2037        struct work_struct *work;
2038
2039        spin_lock_irq(&pool->lock);
2040        spin_lock(&wq_mayday_lock);             /* for wq->maydays */
2041
2042        if (need_to_create_worker(pool)) {
2043                /*
2044                 * We've been trying to create a new worker but
2045                 * haven't been successful.  We might be hitting an
2046                 * allocation deadlock.  Send distress signals to
2047                 * rescuers.
2048                 */
2049                list_for_each_entry(work, &pool->worklist, entry)
2050                        send_mayday(work);
2051        }
2052
2053        spin_unlock(&wq_mayday_lock);
2054        spin_unlock_irq(&pool->lock);
2055
2056        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
2057}
2058
2059/**
2060 * maybe_create_worker - create a new worker if necessary
2061 * @pool: pool to create a new worker for
2062 *
2063 * Create a new worker for @pool if necessary.  @pool is guaranteed to
2064 * have at least one idle worker on return from this function.  If
2065 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
2066 * sent to all rescuers with works scheduled on @pool to resolve
2067 * possible allocation deadlock.
2068 *
2069 * On return, need_to_create_worker() is guaranteed to be %false and
2070 * may_start_working() %true.
2071 *
2072 * LOCKING:
2073 * spin_lock_irq(pool->lock) which may be released and regrabbed
2074 * multiple times.  Does GFP_KERNEL allocations.  Called only from
2075 * manager.
2076 */
2077static void maybe_create_worker(struct worker_pool *pool)
2078__releases(&pool->lock)
2079__acquires(&pool->lock)
2080{
2081restart:
2082        spin_unlock_irq(&pool->lock);
2083
2084        /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
2085        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
2086
2087        while (true) {
2088                if (create_worker(pool) || !need_to_create_worker(pool))
2089                        break;
2090
2091                schedule_timeout_interruptible(CREATE_COOLDOWN);
2092
2093                if (!need_to_create_worker(pool))
2094                        break;
2095        }
2096
2097        del_timer_sync(&pool->mayday_timer);
2098        spin_lock_irq(&pool->lock);
2099        /*
2100         * This is necessary even after a new worker was just successfully
2101         * created as @pool->lock was dropped and the new worker might have
2102         * already become busy.
2103         */
2104        if (need_to_create_worker(pool))
2105                goto restart;
2106}
2107
2108/**
2109 * manage_workers - manage worker pool
2110 * @worker: self
2111 *
2112 * Assume the manager role and manage the worker pool @worker belongs
2113 * to.  At any given time, there can be only zero or one manager per
2114 * pool.  The exclusion is handled automatically by this function.
2115 *
2116 * The caller can safely start processing works on false return.  On
2117 * true return, it's guaranteed that need_to_create_worker() is false
2118 * and may_start_working() is true.
2119 *
2120 * CONTEXT:
2121 * spin_lock_irq(pool->lock) which may be released and regrabbed
2122 * multiple times.  Does GFP_KERNEL allocations.
2123 *
2124 * Return:
2125 * %false if the pool doesn't need management and the caller can safely
2126 * start processing works, %true if management function was performed and
2127 * the conditions that the caller verified before calling the function may
2128 * no longer be true.
2129 */
2130static bool manage_workers(struct worker *worker)
2131{
2132        struct worker_pool *pool = worker->pool;
2133
2134        if (pool->flags & POOL_MANAGER_ACTIVE)
2135                return false;
2136
2137        pool->flags |= POOL_MANAGER_ACTIVE;
2138        pool->manager = worker;
2139
2140        maybe_create_worker(pool);
2141
2142        pool->manager = NULL;
2143        pool->flags &= ~POOL_MANAGER_ACTIVE;
2144        wake_up(&wq_manager_wait);
2145        return true;
2146}
2147
2148/**
2149 * process_one_work - process single work
2150 * @worker: self
2151 * @work: work to process
2152 *
2153 * Process @work.  This function contains all the logics necessary to
2154 * process a single work including synchronization against and
2155 * interaction with other workers on the same cpu, queueing and
2156 * flushing.  As long as context requirement is met, any worker can
2157 * call this function to process a work.
2158 *
2159 * CONTEXT:
2160 * spin_lock_irq(pool->lock) which is released and regrabbed.
2161 */
2162static void process_one_work(struct worker *worker, struct work_struct *work)
2163__releases(&pool->lock)
2164__acquires(&pool->lock)
2165{
2166        struct pool_workqueue *pwq = get_work_pwq(work);
2167        struct worker_pool *pool = worker->pool;
2168        bool cpu_intensive = pwq->wq->flags & WQ_CPU_INTENSIVE;
2169        int work_color;
2170        struct worker *collision;
2171#ifdef CONFIG_LOCKDEP
2172        /*
2173         * It is permissible to free the struct work_struct from
2174         * inside the function that is called from it, this we need to
2175         * take into account for lockdep too.  To avoid bogus "held
2176         * lock freed" warnings as well as problems when looking into
2177         * work->lockdep_map, make a copy and use that here.
2178         */
2179        struct lockdep_map lockdep_map;
2180
2181        lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2182#endif
2183        /* ensure we're on the correct CPU */
2184        WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2185                     raw_smp_processor_id() != pool->cpu);
2186
2187        /*
2188         * A single work shouldn't be executed concurrently by
2189         * multiple workers on a single cpu.  Check whether anyone is
2190         * already processing the work.  If so, defer the work to the
2191         * currently executing one.
2192         */
2193        collision = find_worker_executing_work(pool, work);
2194        if (unlikely(collision)) {
2195                move_linked_works(work, &collision->scheduled, NULL);
2196                return;
2197        }
2198
2199        /* claim and dequeue */
2200        debug_work_deactivate(work);
2201        hash_add(pool->busy_hash, &worker->hentry, (unsigned long)work);
2202        worker->current_work = work;
2203        worker->current_func = work->func;
2204        worker->current_pwq = pwq;
2205        work_color = get_work_color(work);
2206
2207        /*
2208         * Record wq name for cmdline and debug reporting, may get
2209         * overridden through set_worker_desc().
2210         */
2211        strscpy(worker->desc, pwq->wq->name, WORKER_DESC_LEN);
2212
2213        list_del_init(&work->entry);
2214
2215        /*
2216         * CPU intensive works don't participate in concurrency management.
2217         * They're the scheduler's responsibility.  This takes @worker out
2218         * of concurrency management and the next code block will chain
2219         * execution of the pending work items.
2220         */
2221        if (unlikely(cpu_intensive))
2222                worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2223
2224        /*
2225         * Wake up another worker if necessary.  The condition is always
2226         * false for normal per-cpu workers since nr_running would always
2227         * be >= 1 at this point.  This is used to chain execution of the
2228         * pending work items for WORKER_NOT_RUNNING workers such as the
2229         * UNBOUND and CPU_INTENSIVE ones.
2230         */
2231        if (need_more_worker(pool))
2232                wake_up_worker(pool);
2233
2234        /*
2235         * Record the last pool and clear PENDING which should be the last
2236         * update to @work.  Also, do this inside @pool->lock so that
2237         * PENDING and queued state changes happen together while IRQ is
2238         * disabled.
2239         */
2240        set_work_pool_and_clear_pending(work, pool->id);
2241
2242        spin_unlock_irq(&pool->lock);
2243
2244        lock_map_acquire(&pwq->wq->lockdep_map);
2245        lock_map_acquire(&lockdep_map);
2246        /*
2247         * Strictly speaking we should mark the invariant state without holding
2248         * any locks, that is, before these two lock_map_acquire()'s.
2249         *
2250         * However, that would result in:
2251         *
2252         *   A(W1)
2253         *   WFC(C)
2254         *              A(W1)
2255         *              C(C)
2256         *
2257         * Which would create W1->C->W1 dependencies, even though there is no
2258         * actual deadlock possible. There are two solutions, using a
2259         * read-recursive acquire on the work(queue) 'locks', but this will then
2260         * hit the lockdep limitation on recursive locks, or simply discard
2261         * these locks.
2262         *
2263         * AFAICT there is no possible deadlock scenario between the
2264         * flush_work() and complete() primitives (except for single-threaded
2265         * workqueues), so hiding them isn't a problem.
2266         */
2267        lockdep_invariant_state(true);
2268        trace_workqueue_execute_start(work);
2269        worker->current_func(work);
2270        /*
2271         * While we must be careful to not use "work" after this, the trace
2272         * point will only record its address.
2273         */
2274        trace_workqueue_execute_end(work);
2275        lock_map_release(&lockdep_map);
2276        lock_map_release(&pwq->wq->lockdep_map);
2277
2278        if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
2279                pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d\n"
2280                       "     last function: %ps\n",
2281                       current->comm, preempt_count(), task_pid_nr(current),
2282                       worker->current_func);
2283                debug_show_held_locks(current);
2284                dump_stack();
2285        }
2286
2287        /*
2288         * The following prevents a kworker from hogging CPU on !PREEMPT
2289         * kernels, where a requeueing work item waiting for something to
2290         * happen could deadlock with stop_machine as such work item could
2291         * indefinitely requeue itself while all other CPUs are trapped in
2292         * stop_machine. At the same time, report a quiescent RCU state so
2293         * the same condition doesn't freeze RCU.
2294         */
2295        cond_resched();
2296
2297        spin_lock_irq(&pool->lock);
2298
2299        /* clear cpu intensive status */
2300        if (unlikely(cpu_intensive))
2301                worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
2302
2303        /* tag the worker for identification in schedule() */
2304        worker->last_func = worker->current_func;
2305
2306        /* we're done with it, release */
2307        hash_del(&worker->hentry);
2308        worker->current_work = NULL;
2309        worker->current_func = NULL;
2310        worker->current_pwq = NULL;
2311        pwq_dec_nr_in_flight(pwq, work_color);
2312}
2313
2314/**
2315 * process_scheduled_works - process scheduled works
2316 * @worker: self
2317 *
2318 * Process all scheduled works.  Please note that the scheduled list
2319 * may change while processing a work, so this function repeatedly
2320 * fetches a work from the top and executes it.
2321 *
2322 * CONTEXT:
2323 * spin_lock_irq(pool->lock) which may be released and regrabbed
2324 * multiple times.
2325 */
2326static void process_scheduled_works(struct worker *worker)
2327{
2328        while (!list_empty(&worker->scheduled)) {
2329                struct work_struct *work = list_first_entry(&worker->scheduled,
2330                                                struct work_struct, entry);
2331                process_one_work(worker, work);
2332        }
2333}
2334
2335static void set_pf_worker(bool val)
2336{
2337        mutex_lock(&wq_pool_attach_mutex);
2338        if (val)
2339                current->flags |= PF_WQ_WORKER;
2340        else
2341                current->flags &= ~PF_WQ_WORKER;
2342        mutex_unlock(&wq_pool_attach_mutex);
2343}
2344
2345/**
2346 * worker_thread - the worker thread function
2347 * @__worker: self
2348 *
2349 * The worker thread function.  All workers belong to a worker_pool -
2350 * either a per-cpu one or dynamic unbound one.  These workers process all
2351 * work items regardless of their specific target workqueue.  The only
2352 * exception is work items which belong to workqueues with a rescuer which
2353 * will be explained in rescuer_thread().
2354 *
2355 * Return: 0
2356 */
2357static int worker_thread(void *__worker)
2358{
2359        struct worker *worker = __worker;
2360        struct worker_pool *pool = worker->pool;
2361
2362        /* tell the scheduler that this is a workqueue worker */
2363        set_pf_worker(true);
2364woke_up:
2365        spin_lock_irq(&pool->lock);
2366
2367        /* am I supposed to die? */
2368        if (unlikely(worker->flags & WORKER_DIE)) {
2369                spin_unlock_irq(&pool->lock);
2370                WARN_ON_ONCE(!list_empty(&worker->entry));
2371                set_pf_worker(false);
2372
2373                set_task_comm(worker->task, "kworker/dying");
2374                ida_simple_remove(&pool->worker_ida, worker->id);
2375                worker_detach_from_pool(worker);
2376                kfree(worker);
2377                return 0;
2378        }
2379
2380        worker_leave_idle(worker);
2381recheck:
2382        /* no more worker necessary? */
2383        if (!need_more_worker(pool))
2384                goto sleep;
2385
2386        /* do we need to manage? */
2387        if (unlikely(!may_start_working(pool)) && manage_workers(worker))
2388                goto recheck;
2389
2390        /*
2391         * ->scheduled list can only be filled while a worker is
2392         * preparing to process a work or actually processing it.
2393         * Make sure nobody diddled with it while I was sleeping.
2394         */
2395        WARN_ON_ONCE(!list_empty(&worker->scheduled));
2396
2397        /*
2398         * Finish PREP stage.  We're guaranteed to have at least one idle
2399         * worker or that someone else has already assumed the manager
2400         * role.  This is where @worker starts participating in concurrency
2401         * management if applicable and concurrency management is restored
2402         * after being rebound.  See rebind_workers() for details.
2403         */
2404        worker_clr_flags(worker, WORKER_PREP | WORKER_REBOUND);
2405
2406        do {
2407                struct work_struct *work =
2408                        list_first_entry(&pool->worklist,
2409                                         struct work_struct, entry);
2410
2411                pool->watchdog_ts = jiffies;
2412
2413                if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
2414                        /* optimization path, not strictly necessary */
2415                        process_one_work(worker, work);
2416                        if (unlikely(!list_empty(&worker->scheduled)))
2417                                process_scheduled_works(worker);
2418                } else {
2419                        move_linked_works(work, &worker->scheduled, NULL);
2420                        process_scheduled_works(worker);
2421                }
2422        } while (keep_working(pool));
2423
2424        worker_set_flags(worker, WORKER_PREP);
2425sleep:
2426        /*
2427         * pool->lock is held and there's no work to process and no need to
2428         * manage, sleep.  Workers are woken up only while holding
2429         * pool->lock or from local cpu, so setting the current state
2430         * before releasing pool->lock is enough to prevent losing any
2431         * event.
2432         */
2433        worker_enter_idle(worker);
2434        __set_current_state(TASK_IDLE);
2435        spin_unlock_irq(&pool->lock);
2436        schedule();
2437        goto woke_up;
2438}
2439
2440/**
2441 * rescuer_thread - the rescuer thread function
2442 * @__rescuer: self
2443 *
2444 * Workqueue rescuer thread function.  There's one rescuer for each
2445 * workqueue which has WQ_MEM_RECLAIM set.
2446 *
2447 * Regular work processing on a pool may block trying to create a new
2448 * worker which uses GFP_KERNEL allocation which has slight chance of
2449 * developing into deadlock if some works currently on the same queue
2450 * need to be processed to satisfy the GFP_KERNEL allocation.  This is
2451 * the problem rescuer solves.
2452 *
2453 * When such condition is possible, the pool summons rescuers of all
2454 * workqueues which have works queued on the pool and let them process
2455 * those works so that forward progress can be guaranteed.
2456 *
2457 * This should happen rarely.
2458 *
2459 * Return: 0
2460 */
2461static int rescuer_thread(void *__rescuer)
2462{
2463        struct worker *rescuer = __rescuer;
2464        struct workqueue_struct *wq = rescuer->rescue_wq;
2465        struct list_head *scheduled = &rescuer->scheduled;
2466        bool should_stop;
2467
2468        set_user_nice(current, RESCUER_NICE_LEVEL);
2469
2470        /*
2471         * Mark rescuer as worker too.  As WORKER_PREP is never cleared, it
2472         * doesn't participate in concurrency management.
2473         */
2474        set_pf_worker(true);
2475repeat:
2476        set_current_state(TASK_IDLE);
2477
2478        /*
2479         * By the time the rescuer is requested to stop, the workqueue
2480         * shouldn't have any work pending, but @wq->maydays may still have
2481         * pwq(s) queued.  This can happen by non-rescuer workers consuming
2482         * all the work items before the rescuer got to them.  Go through
2483         * @wq->maydays processing before acting on should_stop so that the
2484         * list is always empty on exit.
2485         */
2486        should_stop = kthread_should_stop();
2487
2488        /* see whether any pwq is asking for help */
2489        spin_lock_irq(&wq_mayday_lock);
2490
2491        while (!list_empty(&wq->maydays)) {
2492                struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2493                                        struct pool_workqueue, mayday_node);
2494                struct worker_pool *pool = pwq->pool;
2495                struct work_struct *work, *n;
2496                bool first = true;
2497
2498                __set_current_state(TASK_RUNNING);
2499                list_del_init(&pwq->mayday_node);
2500
2501                spin_unlock_irq(&wq_mayday_lock);
2502
2503                worker_attach_to_pool(rescuer, pool);
2504
2505                spin_lock_irq(&pool->lock);
2506
2507                /*
2508                 * Slurp in all works issued via this workqueue and
2509                 * process'em.
2510                 */
2511                WARN_ON_ONCE(!list_empty(scheduled));
2512                list_for_each_entry_safe(work, n, &pool->worklist, entry) {
2513                        if (get_work_pwq(work) == pwq) {
2514                                if (first)
2515                                        pool->watchdog_ts = jiffies;
2516                                move_linked_works(work, scheduled, &n);
2517                        }
2518                        first = false;
2519                }
2520
2521                if (!list_empty(scheduled)) {
2522                        process_scheduled_works(rescuer);
2523
2524                        /*
2525                         * The above execution of rescued work items could
2526                         * have created more to rescue through
2527                         * pwq_activate_first_delayed() or chained
2528                         * queueing.  Let's put @pwq back on mayday list so
2529                         * that such back-to-back work items, which may be
2530                         * being used to relieve memory pressure, don't
2531                         * incur MAYDAY_INTERVAL delay inbetween.
2532                         */
2533                        if (need_to_create_worker(pool)) {
2534                                spin_lock(&wq_mayday_lock);
2535                                get_pwq(pwq);
2536                                list_move_tail(&pwq->mayday_node, &wq->maydays);
2537                                spin_unlock(&wq_mayday_lock);
2538                        }
2539                }
2540
2541                /*
2542                 * Put the reference grabbed by send_mayday().  @pool won't
2543                 * go away while we're still attached to it.
2544                 */
2545                put_pwq(pwq);
2546
2547                /*
2548                 * Leave this pool.  If need_more_worker() is %true, notify a
2549                 * regular worker; otherwise, we end up with 0 concurrency
2550                 * and stalling the execution.
2551                 */
2552                if (need_more_worker(pool))
2553                        wake_up_worker(pool);
2554
2555                spin_unlock_irq(&pool->lock);
2556
2557                worker_detach_from_pool(rescuer);
2558
2559                spin_lock_irq(&wq_mayday_lock);
2560        }
2561
2562        spin_unlock_irq(&wq_mayday_lock);
2563
2564        if (should_stop) {
2565                __set_current_state(TASK_RUNNING);
2566                set_pf_worker(false);
2567                return 0;
2568        }
2569
2570        /* rescuers should never participate in concurrency management */
2571        WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2572        schedule();
2573        goto repeat;
2574}
2575
2576/**
2577 * check_flush_dependency - check for flush dependency sanity
2578 * @target_wq: workqueue being flushed
2579 * @target_work: work item being flushed (NULL for workqueue flushes)
2580 *
2581 * %current is trying to flush the whole @target_wq or @target_work on it.
2582 * If @target_wq doesn't have %WQ_MEM_RECLAIM, verify that %current is not
2583 * reclaiming memory or running on a workqueue which doesn't have
2584 * %WQ_MEM_RECLAIM as that can break forward-progress guarantee leading to
2585 * a deadlock.
2586 */
2587static void check_flush_dependency(struct workqueue_struct *target_wq,
2588                                   struct work_struct *target_work)
2589{
2590        work_func_t target_func = target_work ? target_work->func : NULL;
2591        struct worker *worker;
2592
2593        if (target_wq->flags & WQ_MEM_RECLAIM)
2594                return;
2595
2596        worker = current_wq_worker();
2597
2598        WARN_ONCE(current->flags & PF_MEMALLOC,
2599                  "workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%ps",
2600                  current->pid, current->comm, target_wq->name, target_func);
2601        WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
2602                              (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
2603                  "workqueue: WQ_MEM_RECLAIM %s:%ps is flushing !WQ_MEM_RECLAIM %s:%ps",
2604                  worker->current_pwq->wq->name, worker->current_func,
2605                  target_wq->name, target_func);
2606}
2607
2608struct wq_barrier {
2609        struct work_struct      work;
2610        struct completion       done;
2611        struct task_struct      *task;  /* purely informational */
2612};
2613
2614static void wq_barrier_func(struct work_struct *work)
2615{
2616        struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2617        complete(&barr->done);
2618}
2619
2620/**
2621 * insert_wq_barrier - insert a barrier work
2622 * @pwq: pwq to insert barrier into
2623 * @barr: wq_barrier to insert
2624 * @target: target work to attach @barr to
2625 * @worker: worker currently executing @target, NULL if @target is not executing
2626 *
2627 * @barr is linked to @target such that @barr is completed only after
2628 * @target finishes execution.  Please note that the ordering
2629 * guarantee is observed only with respect to @target and on the local
2630 * cpu.
2631 *
2632 * Currently, a queued barrier can't be canceled.  This is because
2633 * try_to_grab_pending() can't determine whether the work to be
2634 * grabbed is at the head of the queue and thus can't clear LINKED
2635 * flag of the previous work while there must be a valid next work
2636 * after a work with LINKED flag set.
2637 *
2638 * Note that when @worker is non-NULL, @target may be modified
2639 * underneath us, so we can't reliably determine pwq from @target.
2640 *
2641 * CONTEXT:
2642 * spin_lock_irq(pool->lock).
2643 */
2644static void insert_wq_barrier(struct pool_workqueue *pwq,
2645                              struct wq_barrier *barr,
2646                              struct work_struct *target, struct worker *worker)
2647{
2648        struct list_head *head;
2649        unsigned int linked = 0;
2650
2651        /*
2652         * debugobject calls are safe here even with pool->lock locked
2653         * as we know for sure that this will not trigger any of the
2654         * checks and call back into the fixup functions where we
2655         * might deadlock.
2656         */
2657        INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2658        __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2659
2660        init_completion_map(&barr->done, &target->lockdep_map);
2661
2662        barr->task = current;
2663
2664        /*
2665         * If @target is currently being executed, schedule the
2666         * barrier to the worker; otherwise, put it after @target.
2667         */
2668        if (worker)
2669                head = worker->scheduled.next;
2670        else {
2671                unsigned long *bits = work_data_bits(target);
2672
2673                head = target->entry.next;
2674                /* there can already be other linked works, inherit and set */
2675                linked = *bits & WORK_STRUCT_LINKED;
2676                __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2677        }
2678
2679        debug_work_activate(&barr->work);
2680        insert_work(pwq, &barr->work, head,
2681                    work_color_to_flags(WORK_NO_COLOR) | linked);
2682}
2683
2684/**
2685 * flush_workqueue_prep_pwqs - prepare pwqs for workqueue flushing
2686 * @wq: workqueue being flushed
2687 * @flush_color: new flush color, < 0 for no-op
2688 * @work_color: new work color, < 0 for no-op
2689 *
2690 * Prepare pwqs for workqueue flushing.
2691 *
2692 * If @flush_color is non-negative, flush_color on all pwqs should be
2693 * -1.  If no pwq has in-flight commands at the specified color, all
2694 * pwq->flush_color's stay at -1 and %false is returned.  If any pwq
2695 * has in flight commands, its pwq->flush_color is set to
2696 * @flush_color, @wq->nr_pwqs_to_flush is updated accordingly, pwq
2697 * wakeup logic is armed and %true is returned.
2698 *
2699 * The caller should have initialized @wq->first_flusher prior to
2700 * calling this function with non-negative @flush_color.  If
2701 * @flush_color is negative, no flush color update is done and %false
2702 * is returned.
2703 *
2704 * If @work_color is non-negative, all pwqs should have the same
2705 * work_color which is previous to @work_color and all will be
2706 * advanced to @work_color.
2707 *
2708 * CONTEXT:
2709 * mutex_lock(wq->mutex).
2710 *
2711 * Return:
2712 * %true if @flush_color >= 0 and there's something to flush.  %false
2713 * otherwise.
2714 */
2715static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
2716                                      int flush_color, int work_color)
2717{
2718        bool wait = false;
2719        struct pool_workqueue *pwq;
2720
2721        if (flush_color >= 0) {
2722                WARN_ON_ONCE(atomic_read(&wq->nr_pwqs_to_flush));
2723                atomic_set(&wq->nr_pwqs_to_flush, 1);
2724        }
2725
2726        for_each_pwq(pwq, wq) {
2727                struct worker_pool *pool = pwq->pool;
2728
2729                spin_lock_irq(&pool->lock);
2730
2731                if (flush_color >= 0) {
2732                        WARN_ON_ONCE(pwq->flush_color != -1);
2733
2734                        if (pwq->nr_in_flight[flush_color]) {
2735                                pwq->flush_color = flush_color;
2736                                atomic_inc(&wq->nr_pwqs_to_flush);
2737                                wait = true;
2738                        }
2739                }
2740
2741                if (work_color >= 0) {
2742                        WARN_ON_ONCE(work_color != work_next_color(pwq->work_color));
2743                        pwq->work_color = work_color;
2744                }
2745
2746                spin_unlock_irq(&pool->lock);
2747        }
2748
2749        if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_pwqs_to_flush))
2750                complete(&wq->first_flusher->done);
2751
2752        return wait;
2753}
2754
2755/**
2756 * flush_workqueue - ensure that any scheduled work has run to completion.
2757 * @wq: workqueue to flush
2758 *
2759 * This function sleeps until all work items which were queued on entry
2760 * have finished execution, but it is not livelocked by new incoming ones.
2761 */
2762void flush_workqueue(struct workqueue_struct *wq)
2763{
2764        struct wq_flusher this_flusher = {
2765                .list = LIST_HEAD_INIT(this_flusher.list),
2766                .flush_color = -1,
2767                .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
2768        };
2769        int next_color;
2770
2771        if (WARN_ON(!wq_online))
2772                return;
2773
2774        lock_map_acquire(&wq->lockdep_map);
2775        lock_map_release(&wq->lockdep_map);
2776
2777        mutex_lock(&wq->mutex);
2778
2779        /*
2780         * Start-to-wait phase
2781         */
2782        next_color = work_next_color(wq->work_color);
2783
2784        if (next_color != wq->flush_color) {
2785                /*
2786                 * Color space is not full.  The current work_color
2787                 * becomes our flush_color and work_color is advanced
2788                 * by one.
2789                 */
2790                WARN_ON_ONCE(!list_empty(&wq->flusher_overflow));
2791                this_flusher.flush_color = wq->work_color;
2792                wq->work_color = next_color;
2793
2794                if (!wq->first_flusher) {
2795                        /* no flush in progress, become the first flusher */
2796                        WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2797
2798                        wq->first_flusher = &this_flusher;
2799
2800                        if (!flush_workqueue_prep_pwqs(wq, wq->flush_color,
2801                                                       wq->work_color)) {
2802                                /* nothing to flush, done */
2803                                wq->flush_color = next_color;
2804                                wq->first_flusher = NULL;
2805                                goto out_unlock;
2806                        }
2807                } else {
2808                        /* wait in queue */
2809                        WARN_ON_ONCE(wq->flush_color == this_flusher.flush_color);
2810                        list_add_tail(&this_flusher.list, &wq->flusher_queue);
2811                        flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2812                }
2813        } else {
2814                /*
2815                 * Oops, color space is full, wait on overflow queue.
2816                 * The next flush completion will assign us
2817                 * flush_color and transfer to flusher_queue.
2818                 */
2819                list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2820        }
2821
2822        check_flush_dependency(wq, NULL);
2823
2824        mutex_unlock(&wq->mutex);
2825
2826        wait_for_completion(&this_flusher.done);
2827
2828        /*
2829         * Wake-up-and-cascade phase
2830         *
2831         * First flushers are responsible for cascading flushes and
2832         * handling overflow.  Non-first flushers can simply return.
2833         */
2834        if (wq->first_flusher != &this_flusher)
2835                return;
2836
2837        mutex_lock(&wq->mutex);
2838
2839        /* we might have raced, check again with mutex held */
2840        if (wq->first_flusher != &this_flusher)
2841                goto out_unlock;
2842
2843        wq->first_flusher = NULL;
2844
2845        WARN_ON_ONCE(!list_empty(&this_flusher.list));
2846        WARN_ON_ONCE(wq->flush_color != this_flusher.flush_color);
2847
2848        while (true) {
2849                struct wq_flusher *next, *tmp;
2850
2851                /* complete all the flushers sharing the current flush color */
2852                list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2853                        if (next->flush_color != wq->flush_color)
2854                                break;
2855                        list_del_init(&next->list);
2856                        complete(&next->done);
2857                }
2858
2859                WARN_ON_ONCE(!list_empty(&wq->flusher_overflow) &&
2860                             wq->flush_color != work_next_color(wq->work_color));
2861
2862                /* this flush_color is finished, advance by one */
2863                wq->flush_color = work_next_color(wq->flush_color);
2864
2865                /* one color has been freed, handle overflow queue */
2866                if (!list_empty(&wq->flusher_overflow)) {
2867                        /*
2868                         * Assign the same color to all overflowed
2869                         * flushers, advance work_color and append to
2870                         * flusher_queue.  This is the start-to-wait
2871                         * phase for these overflowed flushers.
2872                         */
2873                        list_for_each_entry(tmp, &wq->flusher_overflow, list)
2874                                tmp->flush_color = wq->work_color;
2875
2876                        wq->work_color = work_next_color(wq->work_color);
2877
2878                        list_splice_tail_init(&wq->flusher_overflow,
2879                                              &wq->flusher_queue);
2880                        flush_workqueue_prep_pwqs(wq, -1, wq->work_color);
2881                }
2882
2883                if (list_empty(&wq->flusher_queue)) {
2884                        WARN_ON_ONCE(wq->flush_color != wq->work_color);
2885                        break;
2886                }
2887
2888                /*
2889                 * Need to flush more colors.  Make the next flusher
2890                 * the new first flusher and arm pwqs.
2891                 */
2892                WARN_ON_ONCE(wq->flush_color == wq->work_color);
2893                WARN_ON_ONCE(wq->flush_color != next->flush_color);
2894
2895                list_del_init(&next->list);
2896                wq->first_flusher = next;
2897
2898                if (flush_workqueue_prep_pwqs(wq, wq->flush_color, -1))
2899                        break;
2900
2901                /*
2902                 * Meh... this color is already done, clear first
2903                 * flusher and repeat cascading.
2904                 */
2905                wq->first_flusher = NULL;
2906        }
2907
2908out_unlock:
2909        mutex_unlock(&wq->mutex);
2910}
2911EXPORT_SYMBOL(flush_workqueue);
2912
2913/**
2914 * drain_workqueue - drain a workqueue
2915 * @wq: workqueue to drain
2916 *
2917 * Wait until the workqueue becomes empty.  While draining is in progress,
2918 * only chain queueing is allowed.  IOW, only currently pending or running
2919 * work items on @wq can queue further work items on it.  @wq is flushed
2920 * repeatedly until it becomes empty.  The number of flushing is determined
2921 * by the depth of chaining and should be relatively short.  Whine if it
2922 * takes too long.
2923 */
2924void drain_workqueue(struct workqueue_struct *wq)
2925{
2926        unsigned int flush_cnt = 0;
2927        struct pool_workqueue *pwq;
2928
2929        /*
2930         * __queue_work() needs to test whether there are drainers, is much
2931         * hotter than drain_workqueue() and already looks at @wq->flags.
2932         * Use __WQ_DRAINING so that queue doesn't have to check nr_drainers.
2933         */
2934        mutex_lock(&wq->mutex);
2935        if (!wq->nr_drainers++)
2936                wq->flags |= __WQ_DRAINING;
2937        mutex_unlock(&wq->mutex);
2938reflush:
2939        flush_workqueue(wq);
2940
2941        mutex_lock(&wq->mutex);
2942
2943        for_each_pwq(pwq, wq) {
2944                bool drained;
2945
2946                spin_lock_irq(&pwq->pool->lock);
2947                drained = !pwq->nr_active && list_empty(&pwq->delayed_works);
2948                spin_unlock_irq(&pwq->pool->lock);
2949
2950                if (drained)
2951                        continue;
2952
2953                if (++flush_cnt == 10 ||
2954                    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
2955                        pr_warn("workqueue %s: drain_workqueue() isn't complete after %u tries\n",
2956                                wq->name, flush_cnt);
2957
2958                mutex_unlock(&wq->mutex);
2959                goto reflush;
2960        }
2961
2962        if (!--wq->nr_drainers)
2963                wq->flags &= ~__WQ_DRAINING;
2964        mutex_unlock(&wq->mutex);
2965}
2966EXPORT_SYMBOL_GPL(drain_workqueue);
2967
2968static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2969                             bool from_cancel)
2970{
2971        struct worker *worker = NULL;
2972        struct worker_pool *pool;
2973        struct pool_workqueue *pwq;
2974
2975        might_sleep();
2976
2977        rcu_read_lock();
2978        pool = get_work_pool(work);
2979        if (!pool) {
2980                rcu_read_unlock();
2981                return false;
2982        }
2983
2984        spin_lock_irq(&pool->lock);
2985        /* see the comment in try_to_grab_pending() with the same code */
2986        pwq = get_work_pwq(work);
2987        if (pwq) {
2988                if (unlikely(pwq->pool != pool))
2989                        goto already_gone;
2990        } else {
2991                worker = find_worker_executing_work(pool, work);
2992                if (!worker)
2993                        goto already_gone;
2994                pwq = worker->current_pwq;
2995        }
2996
2997        check_flush_dependency(pwq->wq, work);
2998
2999        insert_wq_barrier(pwq, barr, work, worker);
3000        spin_unlock_irq(&pool->lock);
3001
3002        /*
3003         * Force a lock recursion deadlock when using flush_work() inside a
3004         * single-threaded or rescuer equipped workqueue.
3005         *
3006         * For single threaded workqueues the deadlock happens when the work
3007         * is after the work issuing the flush_work(). For rescuer equipped
3008         * workqueues the deadlock happens when the rescuer stalls, blocking
3009         * forward progress.
3010         */
3011        if (!from_cancel &&
3012            (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
3013                lock_map_acquire(&pwq->wq->lockdep_map);
3014                lock_map_release(&pwq->wq->lockdep_map);
3015        }
3016        rcu_read_unlock();
3017        return true;
3018already_gone:
3019        spin_unlock_irq(&pool->lock);
3020        rcu_read_unlock();
3021        return false;
3022}
3023
3024static bool __flush_work(struct work_struct *work, bool from_cancel)
3025{
3026        struct wq_barrier barr;
3027
3028        if (WARN_ON(!wq_online))
3029                return false;
3030
3031        if (WARN_ON(!work->func))
3032                return false;
3033
3034        if (!from_cancel) {
3035                lock_map_acquire(&work->lockdep_map);
3036                lock_map_release(&work->lockdep_map);
3037        }
3038
3039        if (start_flush_work(work, &barr, from_cancel)) {
3040                wait_for_completion(&barr.done);
3041                destroy_work_on_stack(&barr.work);
3042                return true;
3043        } else {
3044                return false;
3045        }
3046}
3047
3048/**
3049 * flush_work - wait for a work to finish executing the last queueing instance
3050 * @work: the work to flush
3051 *
3052 * Wait until @work has finished execution.  @work is guaranteed to be idle
3053 * on return if it hasn't been requeued since flush started.
3054 *
3055 * Return:
3056 * %true if flush_work() waited for the work to finish execution,
3057 * %false if it was already idle.
3058 */
3059bool flush_work(struct work_struct *work)
3060{
3061        return __flush_work(work, false);
3062}
3063EXPORT_SYMBOL_GPL(flush_work);
3064
3065struct cwt_wait {
3066        wait_queue_entry_t              wait;
3067        struct work_struct      *work;
3068};
3069
3070static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
3071{
3072        struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
3073
3074        if (cwait->work != key)
3075                return 0;
3076        return autoremove_wake_function(wait, mode, sync, key);
3077}
3078
3079static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
3080{
3081        static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
3082        unsigned long flags;
3083        int ret;
3084
3085        do {
3086                ret = try_to_grab_pending(work, is_dwork, &flags);
3087                /*
3088                 * If someone else is already canceling, wait for it to
3089                 * finish.  flush_work() doesn't work for PREEMPT_NONE
3090                 * because we may get scheduled between @work's completion
3091                 * and the other canceling task resuming and clearing
3092                 * CANCELING - flush_work() will return false immediately
3093                 * as @work is no longer busy, try_to_grab_pending() will
3094                 * return -ENOENT as @work is still being canceled and the
3095                 * other canceling task won't be able to clear CANCELING as
3096                 * we're hogging the CPU.
3097                 *
3098                 * Let's wait for completion using a waitqueue.  As this
3099                 * may lead to the thundering herd problem, use a custom
3100                 * wake function which matches @work along with exclusive
3101                 * wait and wakeup.
3102                 */
3103                if (unlikely(ret == -ENOENT)) {
3104                        struct cwt_wait cwait;
3105
3106                        init_wait(&cwait.wait);
3107                        cwait.wait.func = cwt_wakefn;
3108                        cwait.work = work;
3109
3110                        prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
3111                                                  TASK_UNINTERRUPTIBLE);
3112                        if (work_is_canceling(work))
3113                                schedule();
3114                        finish_wait(&cancel_waitq, &cwait.wait);
3115                }
3116        } while (unlikely(ret < 0));
3117
3118        /* tell other tasks trying to grab @work to back off */
3119        mark_work_canceling(work);
3120        local_irq_restore(flags);
3121
3122        /*
3123         * This allows canceling during early boot.  We know that @work
3124         * isn't executing.
3125         */
3126        if (wq_online)
3127                __flush_work(work, true);
3128
3129        clear_work_data(work);
3130
3131        /*
3132         * Paired with prepare_to_wait() above so that either
3133         * waitqueue_active() is visible here or !work_is_canceling() is
3134         * visible there.
3135         */
3136        smp_mb();
3137        if (waitqueue_active(&cancel_waitq))
3138                __wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
3139
3140        return ret;
3141}
3142
3143/**
3144 * cancel_work_sync - cancel a work and wait for it to finish
3145 * @work: the work to cancel
3146 *
3147 * Cancel @work and wait for its execution to finish.  This function
3148 * can be used even if the work re-queues itself or migrates to
3149 * another workqueue.  On return from this function, @work is
3150 * guaranteed to be not pending or executing on any CPU.
3151 *
3152 * cancel_work_sync(&delayed_work->work) must not be used for
3153 * delayed_work's.  Use cancel_delayed_work_sync() instead.
3154 *
3155 * The caller must ensure that the workqueue on which @work was last
3156 * queued can't be destroyed before this function returns.
3157 *
3158 * Return:
3159 * %true if @work was pending, %false otherwise.
3160 */
3161bool cancel_work_sync(struct work_struct *work)
3162{
3163        return __cancel_work_timer(work, false);
3164}
3165EXPORT_SYMBOL_GPL(cancel_work_sync);
3166
3167/**
3168 * flush_delayed_work - wait for a dwork to finish executing the last queueing
3169 * @dwork: the delayed work to flush
3170 *
3171 * Delayed timer is cancelled and the pending work is queued for
3172 * immediate execution.  Like flush_work(), this function only
3173 * considers the last queueing instance of @dwork.
3174 *
3175 * Return:
3176 * %true if flush_work() waited for the work to finish execution,
3177 * %false if it was already idle.
3178 */
3179bool flush_delayed_work(struct delayed_work *dwork)
3180{
3181        local_irq_disable();
3182        if (del_timer_sync(&dwork->timer))
3183                __queue_work(dwork->cpu, dwork->wq, &dwork->work);
3184        local_irq_enable();
3185        return flush_work(&dwork->work);
3186}
3187EXPORT_SYMBOL(flush_delayed_work);
3188
3189/**
3190 * flush_rcu_work - wait for a rwork to finish executing the last queueing
3191 * @rwork: the rcu work to flush
3192 *
3193 * Return:
3194 * %true if flush_rcu_work() waited for the work to finish execution,
3195 * %false if it was already idle.
3196 */
3197bool flush_rcu_work(struct rcu_work *rwork)
3198{
3199        if (test_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&rwork->work))) {
3200                rcu_barrier();
3201                flush_work(&rwork->work);
3202                return true;
3203        } else {
3204                return flush_work(&rwork->work);
3205        }
3206}
3207EXPORT_SYMBOL(flush_rcu_work);
3208
3209static bool __cancel_work(struct work_struct *work, bool is_dwork)
3210{
3211        unsigned long flags;
3212        int ret;
3213
3214        do {
3215                ret = try_to_grab_pending(work, is_dwork, &flags);
3216        } while (unlikely(ret == -EAGAIN));
3217
3218        if (unlikely(ret < 0))
3219                return false;
3220
3221        set_work_pool_and_clear_pending(work, get_work_pool_id(work));
3222        local_irq_restore(flags);
3223        return ret;
3224}
3225
3226/**
3227 * cancel_delayed_work - cancel a delayed work
3228 * @dwork: delayed_work to cancel
3229 *
3230 * Kill off a pending delayed_work.
3231 *
3232 * Return: %true if @dwork was pending and canceled; %false if it wasn't
3233 * pending.
3234 *
3235 * Note:
3236 * The work callback function may still be running on return, unless
3237 * it returns %true and the work doesn't re-arm itself.  Explicitly flush or
3238 * use cancel_delayed_work_sync() to wait on it.
3239 *
3240 * This function is safe to call from any context including IRQ handler.
3241 */
3242bool cancel_delayed_work(struct delayed_work *dwork)
3243{
3244        return __cancel_work(&dwork->work, true);
3245}
3246EXPORT_SYMBOL(cancel_delayed_work);
3247
3248/**
3249 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
3250 * @dwork: the delayed work cancel
3251 *
3252 * This is cancel_work_sync() for delayed works.
3253 *
3254 * Return:
3255 * %true if @dwork was pending, %false otherwise.
3256 */
3257bool cancel_delayed_work_sync(struct delayed_work *dwork)
3258{
3259        return __cancel_work_timer(&dwork->work, true);
3260}
3261EXPORT_SYMBOL(cancel_delayed_work_sync);
3262
3263/**
3264 * schedule_on_each_cpu - execute a function synchronously on each online CPU
3265 * @func: the function to call
3266 *
3267 * schedule_on_each_cpu() executes @func on each online CPU using the
3268 * system workqueue and blocks until all CPUs have completed.
3269 * schedule_on_each_cpu() is very slow.
3270 *
3271 * Return:
3272 * 0 on success, -errno on failure.
3273 */
3274int schedule_on_each_cpu(work_func_t func)
3275{
3276        int cpu;
3277        struct work_struct __percpu *works;
3278
3279        works = alloc_percpu(struct work_struct);
3280        if (!works)
3281                return -ENOMEM;
3282
3283        get_online_cpus();
3284
3285        for_each_online_cpu(cpu) {
3286                struct work_struct *work = per_cpu_ptr(works, cpu);
3287
3288                INIT_WORK(work, func);
3289                schedule_work_on(cpu, work);
3290        }
3291
3292        for_each_online_cpu(cpu)
3293                flush_work(per_cpu_ptr(works, cpu));
3294
3295        put_online_cpus();
3296        free_percpu(works);
3297        return 0;
3298}
3299
3300/**
3301 * execute_in_process_context - reliably execute the routine with user context
3302 * @fn:         the function to execute
3303 * @ew:         guaranteed storage for the execute work structure (must
3304 *              be available when the work executes)
3305 *
3306 * Executes the function immediately if process context is available,
3307 * otherwise schedules the function for delayed execution.
3308 *
3309 * Return:      0 - function was executed
3310 *              1 - function was scheduled for execution
3311 */
3312int execute_in_process_context(work_func_t fn, struct execute_work *ew)
3313{
3314        if (!in_interrupt()) {
3315                fn(&ew->work);
3316                return 0;
3317        }
3318
3319        INIT_WORK(&ew->work, fn);
3320        schedule_work(&ew->work);
3321
3322        return 1;
3323}
3324EXPORT_SYMBOL_GPL(execute_in_process_context);
3325
3326/**
3327 * free_workqueue_attrs - free a workqueue_attrs
3328 * @attrs: workqueue_attrs to free
3329 *
3330 * Undo alloc_workqueue_attrs().
3331 */
3332void free_workqueue_attrs(struct workqueue_attrs *attrs)
3333{
3334        if (attrs) {
3335                free_cpumask_var(attrs->cpumask);
3336                kfree(attrs);
3337        }
3338}
3339
3340/**
3341 * alloc_workqueue_attrs - allocate a workqueue_attrs
3342 *
3343 * Allocate a new workqueue_attrs, initialize with default settings and
3344 * return it.
3345 *
3346 * Return: The allocated new workqueue_attr on success. %NULL on failure.
3347 */
3348struct workqueue_attrs *alloc_workqueue_attrs(void)
3349{
3350        struct workqueue_attrs *attrs;
3351
3352        attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
3353        if (!attrs)
3354                goto fail;
3355        if (!alloc_cpumask_var(&attrs->cpumask, GFP_KERNEL))
3356                goto fail;
3357
3358        cpumask_copy(attrs->cpumask, cpu_possible_mask);
3359        return attrs;
3360fail:
3361        free_workqueue_attrs(attrs);
3362        return NULL;
3363}
3364
3365static void copy_workqueue_attrs(struct workqueue_attrs *to,
3366                                 const struct workqueue_attrs *from)
3367{
3368        to->nice = from->nice;
3369        cpumask_copy(to->cpumask, from->cpumask);
3370        /*
3371         * Unlike hash and equality test, this function doesn't ignore
3372         * ->no_numa as it is used for both pool and wq attrs.  Instead,
3373         * get_unbound_pool() explicitly clears ->no_numa after copying.
3374         */
3375        to->no_numa = from->no_numa;
3376}
3377
3378/* hash value of the content of @attr */
3379static u32 wqattrs_hash(const struct workqueue_attrs *attrs)
3380{
3381        u32 hash = 0;
3382
3383        hash = jhash_1word(attrs->nice, hash);
3384        hash = jhash(cpumask_bits(attrs->cpumask),
3385                     BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long), hash);
3386        return hash;
3387}
3388
3389/* content equality test */
3390static bool wqattrs_equal(const struct workqueue_attrs *a,
3391                          const struct workqueue_attrs *b)
3392{
3393        if (a->nice != b->nice)
3394                return false;
3395        if (!cpumask_equal(a->cpumask, b->cpumask))
3396                return false;
3397        return true;
3398}
3399
3400/**
3401 * init_worker_pool - initialize a newly zalloc'd worker_pool
3402 * @pool: worker_pool to initialize
3403 *
3404 * Initialize a newly zalloc'd @pool.  It also allocates @pool->attrs.
3405 *
3406 * Return: 0 on success, -errno on failure.  Even on failure, all fields
3407 * inside @pool proper are initialized and put_unbound_pool() can be called
3408 * on @pool safely to release it.
3409 */
3410static int init_worker_pool(struct worker_pool *pool)
3411{
3412        spin_lock_init(&pool->lock);
3413        pool->id = -1;
3414        pool->cpu = -1;
3415        pool->node = NUMA_NO_NODE;
3416        pool->flags |= POOL_DISASSOCIATED;
3417        pool->watchdog_ts = jiffies;
3418        INIT_LIST_HEAD(&pool->worklist);
3419        INIT_LIST_HEAD(&pool->idle_list);
3420        hash_init(pool->busy_hash);
3421
3422        timer_setup(&pool->idle_timer, idle_worker_timeout, TIMER_DEFERRABLE);
3423
3424        timer_setup(&pool->mayday_timer, pool_mayday_timeout, 0);
3425
3426        INIT_LIST_HEAD(&pool->workers);
3427
3428        ida_init(&pool->worker_ida);
3429        INIT_HLIST_NODE(&pool->hash_node);
3430        pool->refcnt = 1;
3431
3432        /* shouldn't fail above this point */
3433        pool->attrs = alloc_workqueue_attrs();
3434        if (!pool->attrs)
3435                return -ENOMEM;
3436        return 0;
3437}
3438
3439#ifdef CONFIG_LOCKDEP
3440static void wq_init_lockdep(struct workqueue_struct *wq)
3441{
3442        char *lock_name;
3443
3444        lockdep_register_key(&wq->key);
3445        lock_name = kasprintf(GFP_KERNEL, "%s%s", "(wq_completion)", wq->name);
3446        if (!lock_name)
3447                lock_name = wq->name;
3448
3449        wq->lock_name = lock_name;
3450        lockdep_init_map(&wq->lockdep_map, lock_name, &wq->key, 0);
3451}
3452
3453static void wq_unregister_lockdep(struct workqueue_struct *wq)
3454{
3455        lockdep_unregister_key(&wq->key);
3456}
3457
3458static void wq_free_lockdep(struct workqueue_struct *wq)
3459{
3460        if (wq->lock_name != wq->name)
3461                kfree(wq->lock_name);
3462}
3463#else
3464static void wq_init_lockdep(struct workqueue_struct *wq)
3465{
3466}
3467
3468static void wq_unregister_lockdep(struct workqueue_struct *wq)
3469{
3470}
3471
3472static void wq_free_lockdep(struct workqueue_struct *wq)
3473{
3474}
3475#endif
3476
3477static void rcu_free_wq(struct rcu_head *rcu)
3478{
3479        struct workqueue_struct *wq =
3480                container_of(rcu, struct workqueue_struct, rcu);
3481
3482        wq_free_lockdep(wq);
3483
3484        if (!(wq->flags & WQ_UNBOUND))
3485                free_percpu(wq->cpu_pwqs);
3486        else
3487                free_workqueue_attrs(wq->unbound_attrs);
3488
3489        kfree(wq->rescuer);
3490        kfree(wq);
3491}
3492
3493static void rcu_free_pool(struct rcu_head *rcu)
3494{
3495        struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu);
3496
3497        ida_destroy(&pool->worker_ida);
3498        free_workqueue_attrs(pool->attrs);
3499        kfree(pool);
3500}
3501
3502/**
3503 * put_unbound_pool - put a worker_pool
3504 * @pool: worker_pool to put
3505 *
3506 * Put @pool.  If its refcnt reaches zero, it gets destroyed in RCU
3507 * safe manner.  get_unbound_pool() calls this function on its failure path
3508 * and this function should be able to release pools which went through,
3509 * successfully or not, init_worker_pool().
3510 *
3511 * Should be called with wq_pool_mutex held.
3512 */
3513static void put_unbound_pool(struct worker_pool *pool)
3514{
3515        DECLARE_COMPLETION_ONSTACK(detach_completion);
3516        struct worker *worker;
3517
3518        lockdep_assert_held(&wq_pool_mutex);
3519
3520        if (--pool->refcnt)
3521                return;
3522
3523        /* sanity checks */
3524        if (WARN_ON(!(pool->cpu < 0)) ||
3525            WARN_ON(!list_empty(&pool->worklist)))
3526                return;
3527
3528        /* release id and unhash */
3529        if (pool->id >= 0)
3530                idr_remove(&worker_pool_idr, pool->id);
3531        hash_del(&pool->hash_node);
3532
3533        /*
3534         * Become the manager and destroy all workers.  This prevents
3535         * @pool's workers from blocking on attach_mutex.  We're the last
3536         * manager and @pool gets freed with the flag set.
3537         */
3538        spin_lock_irq(&pool->lock);
3539        wait_event_lock_irq(wq_manager_wait,
3540                            !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock);
3541        pool->flags |= POOL_MANAGER_ACTIVE;
3542
3543        while ((worker = first_idle_worker(pool)))
3544                destroy_worker(worker);
3545        WARN_ON(pool->nr_workers || pool->nr_idle);
3546        spin_unlock_irq(&pool->lock);
3547
3548        mutex_lock(&wq_pool_attach_mutex);
3549        if (!list_empty(&pool->workers))
3550                pool->detach_completion = &detach_completion;
3551        mutex_unlock(&wq_pool_attach_mutex);
3552
3553        if (pool->detach_completion)
3554                wait_for_completion(pool->detach_completion);
3555
3556        /* shut down the timers */
3557        del_timer_sync(&pool->idle_timer);
3558        del_timer_sync(&pool->mayday_timer);
3559
3560        /* RCU protected to allow dereferences from get_work_pool() */
3561        call_rcu(&pool->rcu, rcu_free_pool);
3562}
3563
3564/**
3565 * get_unbound_pool - get a worker_pool with the specified attributes
3566 * @attrs: the attributes of the worker_pool to get
3567 *
3568 * Obtain a worker_pool which has the same attributes as @attrs, bump the
3569 * reference count and return it.  If there already is a matching
3570 * worker_pool, it will be used; otherwise, this function attempts to
3571 * create a new one.
3572 *
3573 * Should be called with wq_pool_mutex held.
3574 *
3575 * Return: On success, a worker_pool with the same attributes as @attrs.
3576 * On failure, %NULL.
3577 */
3578static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3579{
3580        u32 hash = wqattrs_hash(attrs);
3581        struct worker_pool *pool;
3582        int node;
3583        int target_node = NUMA_NO_NODE;
3584
3585        lockdep_assert_held(&wq_pool_mutex);
3586
3587        /* do we already have a matching pool? */
3588        hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3589                if (wqattrs_equal(pool->attrs, attrs)) {
3590                        pool->refcnt++;
3591                        return pool;
3592                }
3593        }
3594
3595        /* if cpumask is contained inside a NUMA node, we belong to that node */
3596        if (wq_numa_enabled) {
3597                for_each_node(node) {
3598                        if (cpumask_subset(attrs->cpumask,
3599                                           wq_numa_possible_cpumask[node])) {
3600                                target_node = node;
3601                                break;
3602                        }
3603                }
3604        }
3605
3606        /* nope, create a new one */
3607        pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, target_node);
3608        if (!pool || init_worker_pool(pool) < 0)
3609                goto fail;
3610
3611        lockdep_set_subclass(&pool->lock, 1);   /* see put_pwq() */
3612        copy_workqueue_attrs(pool->attrs, attrs);
3613        pool->node = target_node;
3614
3615        /*
3616         * no_numa isn't a worker_pool attribute, always clear it.  See
3617         * 'struct workqueue_attrs' comments for detail.
3618         */
3619        pool->attrs->no_numa = false;
3620
3621        if (worker_pool_assign_id(pool) < 0)
3622                goto fail;
3623
3624        /* create and start the initial worker */
3625        if (wq_online && !create_worker(pool))
3626                goto fail;
3627
3628        /* install */
3629        hash_add(unbound_pool_hash, &pool->hash_node, hash);
3630
3631        return pool;
3632fail:
3633        if (pool)
3634                put_unbound_pool(pool);
3635        return NULL;
3636}
3637
3638static void rcu_free_pwq(struct rcu_head *rcu)
3639{
3640        kmem_cache_free(pwq_cache,
3641                        container_of(rcu, struct pool_workqueue, rcu));
3642}
3643
3644/*
3645 * Scheduled on system_wq by put_pwq() when an unbound pwq hits zero refcnt
3646 * and needs to be destroyed.
3647 */
3648static void pwq_unbound_release_workfn(struct work_struct *work)
3649{
3650        struct pool_workqueue *pwq = container_of(work, struct pool_workqueue,
3651                                                  unbound_release_work);
3652        struct workqueue_struct *wq = pwq->wq;
3653        struct worker_pool *pool = pwq->pool;
3654        bool is_last;
3655
3656        if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3657                return;
3658
3659        mutex_lock(&wq->mutex);
3660        list_del_rcu(&pwq->pwqs_node);
3661        is_last = list_empty(&wq->pwqs);
3662        mutex_unlock(&wq->mutex);
3663
3664        mutex_lock(&wq_pool_mutex);
3665        put_unbound_pool(pool);
3666        mutex_unlock(&wq_pool_mutex);
3667
3668        call_rcu(&pwq->rcu, rcu_free_pwq);
3669
3670        /*
3671         * If we're the last pwq going away, @wq is already dead and no one
3672         * is gonna access it anymore.  Schedule RCU free.
3673         */
3674        if (is_last) {
3675                wq_unregister_lockdep(wq);
3676                call_rcu(&wq->rcu, rcu_free_wq);
3677        }
3678}
3679
3680/**
3681 * pwq_adjust_max_active - update a pwq's max_active to the current setting
3682 * @pwq: target pool_workqueue
3683 *
3684 * If @pwq isn't freezing, set @pwq->max_active to the associated
3685 * workqueue's saved_max_active and activate delayed work items
3686 * accordingly.  If @pwq is freezing, clear @pwq->max_active to zero.
3687 */
3688static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3689{
3690        struct workqueue_struct *wq = pwq->wq;
3691        bool freezable = wq->flags & WQ_FREEZABLE;
3692        unsigned long flags;
3693
3694        /* for @wq->saved_max_active */
3695        lockdep_assert_held(&wq->mutex);
3696
3697        /* fast exit for non-freezable wqs */
3698        if (!freezable && pwq->max_active == wq->saved_max_active)
3699                return;
3700
3701        /* this function can be called during early boot w/ irq disabled */
3702        spin_lock_irqsave(&pwq->pool->lock, flags);
3703
3704        /*
3705         * During [un]freezing, the caller is responsible for ensuring that
3706         * this function is called at least once after @workqueue_freezing
3707         * is updated and visible.
3708         */
3709        if (!freezable || !workqueue_freezing) {
3710                pwq->max_active = wq->saved_max_active;
3711
3712                while (!list_empty(&pwq->delayed_works) &&
3713                       pwq->nr_active < pwq->max_active)
3714                        pwq_activate_first_delayed(pwq);
3715
3716                /*
3717                 * Need to kick a worker after thawed or an unbound wq's
3718                 * max_active is bumped.  It's a slow path.  Do it always.
3719                 */
3720                wake_up_worker(pwq->pool);
3721        } else {
3722                pwq->max_active = 0;
3723        }
3724
3725        spin_unlock_irqrestore(&pwq->pool->lock, flags);
3726}
3727
3728/* initialize newly alloced @pwq which is associated with @wq and @pool */
3729static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3730                     struct worker_pool *pool)
3731{
3732        BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3733
3734        memset(pwq, 0, sizeof(*pwq));
3735
3736        pwq->pool = pool;
3737        pwq->wq = wq;
3738        pwq->flush_color = -1;
3739        pwq->refcnt = 1;
3740        INIT_LIST_HEAD(&pwq->delayed_works);
3741        INIT_LIST_HEAD(&pwq->pwqs_node);
3742        INIT_LIST_HEAD(&pwq->mayday_node);
3743        INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3744}
3745
3746/* sync @pwq with the current state of its associated wq and link it */
3747static void link_pwq(struct pool_workqueue *pwq)
3748{
3749        struct workqueue_struct *wq = pwq->wq;
3750
3751        lockdep_assert_held(&wq->mutex);
3752
3753        /* may be called multiple times, ignore if already linked */
3754        if (!list_empty(&pwq->pwqs_node))
3755                return;
3756
3757        /* set the matching work_color */
3758        pwq->work_color = wq->work_color;
3759
3760        /* sync max_active to the current setting */
3761        pwq_adjust_max_active(pwq);
3762
3763        /* link in @pwq */
3764        list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3765}
3766
3767/* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3768static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3769                                        const struct workqueue_attrs *attrs)
3770{
3771        struct worker_pool *pool;
3772        struct pool_workqueue *pwq;
3773
3774        lockdep_assert_held(&wq_pool_mutex);
3775
3776        pool = get_unbound_pool(attrs);
3777        if (!pool)
3778                return NULL;
3779
3780        pwq = kmem_cache_alloc_node(pwq_cache, GFP_KERNEL, pool->node);
3781        if (!pwq) {
3782                put_unbound_pool(pool);
3783                return NULL;
3784        }
3785
3786        init_pwq(pwq, wq, pool);
3787        return pwq;
3788}
3789
3790/**
3791 * wq_calc_node_cpumask - calculate a wq_attrs' cpumask for the specified node
3792 * @attrs: the wq_attrs of the default pwq of the target workqueue
3793 * @node: the target NUMA node
3794 * @cpu_going_down: if >= 0, the CPU to consider as offline
3795 * @cpumask: outarg, the resulting cpumask
3796 *
3797 * Calculate the cpumask a workqueue with @attrs should use on @node.  If
3798 * @cpu_going_down is >= 0, that cpu is considered offline during
3799 * calculation.  The result is stored in @cpumask.
3800 *
3801 * If NUMA affinity is not enabled, @attrs->cpumask is always used.  If
3802 * enabled and @node has online CPUs requested by @attrs, the returned
3803 * cpumask is the intersection of the possible CPUs of @node and
3804 * @attrs->cpumask.
3805 *
3806 * The caller is responsible for ensuring that the cpumask of @node stays
3807 * stable.
3808 *
3809 * Return: %true if the resulting @cpumask is different from @attrs->cpumask,
3810 * %false if equal.
3811 */
3812static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3813                                 int cpu_going_down, cpumask_t *cpumask)
3814{
3815        if (!wq_numa_enabled || attrs->no_numa)
3816                goto use_dfl;
3817
3818        /* does @node have any online CPUs @attrs wants? */
3819        cpumask_and(cpumask, cpumask_of_node(node), attrs->cpumask);
3820        if (cpu_going_down >= 0)
3821                cpumask_clear_cpu(cpu_going_down, cpumask);
3822
3823        if (cpumask_empty(cpumask))
3824                goto use_dfl;
3825
3826        /* yeap, return possible CPUs in @node that @attrs wants */
3827        cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3828
3829        if (cpumask_empty(cpumask)) {
3830                pr_warn_once("WARNING: workqueue cpumask: online intersect > "
3831                                "possible intersect\n");
3832                return false;
3833        }
3834
3835        return !cpumask_equal(cpumask, attrs->cpumask);
3836
3837use_dfl:
3838        cpumask_copy(cpumask, attrs->cpumask);
3839        return false;
3840}
3841
3842/* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
3843static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3844                                                   int node,
3845                                                   struct pool_workqueue *pwq)
3846{
3847        struct pool_workqueue *old_pwq;
3848
3849        lockdep_assert_held(&wq_pool_mutex);
3850        lockdep_assert_held(&wq->mutex);
3851
3852        /* link_pwq() can handle duplicate calls */
3853        link_pwq(pwq);
3854
3855        old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3856        rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3857        return old_pwq;
3858}
3859
3860/* context to store the prepared attrs & pwqs before applying */
3861struct apply_wqattrs_ctx {
3862        struct workqueue_struct *wq;            /* target workqueue */
3863        struct workqueue_attrs  *attrs;         /* attrs to apply */
3864        struct list_head        list;           /* queued for batching commit */
3865        struct pool_workqueue   *dfl_pwq;
3866        struct pool_workqueue   *pwq_tbl[];
3867};
3868
3869/* free the resources after success or abort */
3870static void apply_wqattrs_cleanup(struct apply_wqattrs_ctx *ctx)
3871{
3872        if (ctx) {
3873                int node;
3874
3875                for_each_node(node)
3876                        put_pwq_unlocked(ctx->pwq_tbl[node]);
3877                put_pwq_unlocked(ctx->dfl_pwq);
3878
3879                free_workqueue_attrs(ctx->attrs);
3880
3881                kfree(ctx);
3882        }
3883}
3884
3885/* allocate the attrs and pwqs for later installation */
3886static struct apply_wqattrs_ctx *
3887apply_wqattrs_prepare(struct workqueue_struct *wq,
3888                      const struct workqueue_attrs *attrs)
3889{
3890        struct apply_wqattrs_ctx *ctx;
3891        struct workqueue_attrs *new_attrs, *tmp_attrs;
3892        int node;
3893
3894        lockdep_assert_held(&wq_pool_mutex);
3895
3896        ctx = kzalloc(struct_size(ctx, pwq_tbl, nr_node_ids), GFP_KERNEL);
3897
3898        new_attrs = alloc_workqueue_attrs();
3899        tmp_attrs = alloc_workqueue_attrs();
3900        if (!ctx || !new_attrs || !tmp_attrs)
3901                goto out_free;
3902
3903        /*
3904         * Calculate the attrs of the default pwq.
3905         * If the user configured cpumask doesn't overlap with the
3906         * wq_unbound_cpumask, we fallback to the wq_unbound_cpumask.
3907         */
3908        copy_workqueue_attrs(new_attrs, attrs);
3909        cpumask_and(new_attrs->cpumask, new_attrs->cpumask, wq_unbound_cpumask);
3910        if (unlikely(cpumask_empty(new_attrs->cpumask)))
3911                cpumask_copy(new_attrs->cpumask, wq_unbound_cpumask);
3912
3913        /*
3914         * We may create multiple pwqs with differing cpumasks.  Make a
3915         * copy of @new_attrs which will be modified and used to obtain
3916         * pools.
3917         */
3918        copy_workqueue_attrs(tmp_attrs, new_attrs);
3919
3920        /*
3921         * If something goes wrong during CPU up/down, we'll fall back to
3922         * the default pwq covering whole @attrs->cpumask.  Always create
3923         * it even if we don't use it immediately.
3924         */
3925        ctx->dfl_pwq = alloc_unbound_pwq(wq, new_attrs);
3926        if (!ctx->dfl_pwq)
3927                goto out_free;
3928
3929        for_each_node(node) {
3930                if (wq_calc_node_cpumask(new_attrs, node, -1, tmp_attrs->cpumask)) {
3931                        ctx->pwq_tbl[node] = alloc_unbound_pwq(wq, tmp_attrs);
3932                        if (!ctx->pwq_tbl[node])
3933                                goto out_free;
3934                } else {
3935                        ctx->dfl_pwq->refcnt++;
3936                        ctx->pwq_tbl[node] = ctx->dfl_pwq;
3937                }
3938        }
3939
3940        /* save the user configured attrs and sanitize it. */
3941        copy_workqueue_attrs(new_attrs, attrs);
3942        cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
3943        ctx->attrs = new_attrs;
3944
3945        ctx->wq = wq;
3946        free_workqueue_attrs(tmp_attrs);
3947        return ctx;
3948
3949out_free:
3950        free_workqueue_attrs(tmp_attrs);
3951        free_workqueue_attrs(new_attrs);
3952        apply_wqattrs_cleanup(ctx);
3953        return NULL;
3954}
3955
3956/* set attrs and install prepared pwqs, @ctx points to old pwqs on return */
3957static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx)
3958{
3959        int node;
3960
3961        /* all pwqs have been created successfully, let's install'em */
3962        mutex_lock(&ctx->wq->mutex);
3963
3964        copy_workqueue_attrs(ctx->wq->unbound_attrs, ctx->attrs);
3965
3966        /* save the previous pwq and install the new one */
3967        for_each_node(node)
3968                ctx->pwq_tbl[node] = numa_pwq_tbl_install(ctx->wq, node,
3969                                                          ctx->pwq_tbl[node]);
3970
3971        /* @dfl_pwq might not have been used, ensure it's linked */
3972        link_pwq(ctx->dfl_pwq);
3973        swap(ctx->wq->dfl_pwq, ctx->dfl_pwq);
3974
3975        mutex_unlock(&ctx->wq->mutex);
3976}
3977
3978static void apply_wqattrs_lock(void)
3979{
3980        /* CPUs should stay stable across pwq creations and installations */
3981        get_online_cpus();
3982        mutex_lock(&wq_pool_mutex);
3983}
3984
3985static void apply_wqattrs_unlock(void)
3986{
3987        mutex_unlock(&wq_pool_mutex);
3988        put_online_cpus();
3989}
3990
3991static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
3992                                        const struct workqueue_attrs *attrs)
3993{
3994        struct apply_wqattrs_ctx *ctx;
3995
3996        /* only unbound workqueues can change attributes */
3997        if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
3998                return -EINVAL;
3999
4000        /* creating multiple pwqs breaks ordering guarantee */
4001        if (!list_empty(&wq->pwqs)) {
4002                if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4003                        return -EINVAL;
4004
4005                wq->flags &= ~__WQ_ORDERED;
4006        }
4007
4008        ctx = apply_wqattrs_prepare(wq, attrs);
4009        if (!ctx)
4010                return -ENOMEM;
4011
4012        /* the ctx has been prepared successfully, let's commit it */
4013        apply_wqattrs_commit(ctx);
4014        apply_wqattrs_cleanup(ctx);
4015
4016        return 0;
4017}
4018
4019/**
4020 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
4021 * @wq: the target workqueue
4022 * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs()
4023 *
4024 * Apply @attrs to an unbound workqueue @wq.  Unless disabled, on NUMA
4025 * machines, this function maps a separate pwq to each NUMA node with
4026 * possibles CPUs in @attrs->cpumask so that work items are affine to the
4027 * NUMA node it was issued on.  Older pwqs are released as in-flight work
4028 * items finish.  Note that a work item which repeatedly requeues itself
4029 * back-to-back will stay on its current pwq.
4030 *
4031 * Performs GFP_KERNEL allocations.
4032 *
4033 * Assumes caller has CPU hotplug read exclusion, i.e. get_online_cpus().
4034 *
4035 * Return: 0 on success and -errno on failure.
4036 */
4037int apply_workqueue_attrs(struct workqueue_struct *wq,
4038                          const struct workqueue_attrs *attrs)
4039{
4040        int ret;
4041
4042        lockdep_assert_cpus_held();
4043
4044        mutex_lock(&wq_pool_mutex);
4045        ret = apply_workqueue_attrs_locked(wq, attrs);
4046        mutex_unlock(&wq_pool_mutex);
4047
4048        return ret;
4049}
4050
4051/**
4052 * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug
4053 * @wq: the target workqueue
4054 * @cpu: the CPU coming up or going down
4055 * @online: whether @cpu is coming up or going down
4056 *
4057 * This function is to be called from %CPU_DOWN_PREPARE, %CPU_ONLINE and
4058 * %CPU_DOWN_FAILED.  @cpu is being hot[un]plugged, update NUMA affinity of
4059 * @wq accordingly.
4060 *
4061 * If NUMA affinity can't be adjusted due to memory allocation failure, it
4062 * falls back to @wq->dfl_pwq which may not be optimal but is always
4063 * correct.
4064 *
4065 * Note that when the last allowed CPU of a NUMA node goes offline for a
4066 * workqueue with a cpumask spanning multiple nodes, the workers which were
4067 * already executing the work items for the workqueue will lose their CPU
4068 * affinity and may execute on any CPU.  This is similar to how per-cpu
4069 * workqueues behave on CPU_DOWN.  If a workqueue user wants strict
4070 * affinity, it's the user's responsibility to flush the work item from
4071 * CPU_DOWN_PREPARE.
4072 */
4073static void wq_update_unbound_numa(struct workqueue_struct *wq, int cpu,
4074                                   bool online)
4075{
4076        int node = cpu_to_node(cpu);
4077        int cpu_off = online ? -1 : cpu;
4078        struct pool_workqueue *old_pwq = NULL, *pwq;
4079        struct workqueue_attrs *target_attrs;
4080        cpumask_t *cpumask;
4081
4082        lockdep_assert_held(&wq_pool_mutex);
4083
4084        if (!wq_numa_enabled || !(wq->flags & WQ_UNBOUND) ||
4085            wq->unbound_attrs->no_numa)
4086                return;
4087
4088        /*
4089         * We don't wanna alloc/free wq_attrs for each wq for each CPU.
4090         * Let's use a preallocated one.  The following buf is protected by
4091         * CPU hotplug exclusion.
4092         */
4093        target_attrs = wq_update_unbound_numa_attrs_buf;
4094        cpumask = target_attrs->cpumask;
4095
4096        copy_workqueue_attrs(target_attrs, wq->unbound_attrs);
4097        pwq = unbound_pwq_by_node(wq, node);
4098
4099        /*
4100         * Let's determine what needs to be done.  If the target cpumask is
4101         * different from the default pwq's, we need to compare it to @pwq's
4102         * and create a new one if they don't match.  If the target cpumask
4103         * equals the default pwq's, the default pwq should be used.
4104         */
4105        if (wq_calc_node_cpumask(wq->dfl_pwq->pool->attrs, node, cpu_off, cpumask)) {
4106                if (cpumask_equal(cpumask, pwq->pool->attrs->cpumask))
4107                        return;
4108        } else {
4109                goto use_dfl_pwq;
4110        }
4111
4112        /* create a new pwq */
4113        pwq = alloc_unbound_pwq(wq, target_attrs);
4114        if (!pwq) {
4115                pr_warn("workqueue: allocation failed while updating NUMA affinity of \"%s\"\n",
4116                        wq->name);
4117                goto use_dfl_pwq;
4118        }
4119
4120        /* Install the new pwq. */
4121        mutex_lock(&wq->mutex);
4122        old_pwq = numa_pwq_tbl_install(wq, node, pwq);
4123        goto out_unlock;
4124
4125use_dfl_pwq:
4126        mutex_lock(&wq->mutex);
4127        spin_lock_irq(&wq->dfl_pwq->pool->lock);
4128        get_pwq(wq->dfl_pwq);
4129        spin_unlock_irq(&wq->dfl_pwq->pool->lock);
4130        old_pwq = numa_pwq_tbl_install(wq, node, wq->dfl_pwq);
4131out_unlock:
4132        mutex_unlock(&wq->mutex);
4133        put_pwq_unlocked(old_pwq);
4134}
4135
4136static int alloc_and_link_pwqs(struct workqueue_struct *wq)
4137{
4138        bool highpri = wq->flags & WQ_HIGHPRI;
4139        int cpu, ret;
4140
4141        if (!(wq->flags & WQ_UNBOUND)) {
4142                wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
4143                if (!wq->cpu_pwqs)
4144                        return -ENOMEM;
4145
4146                for_each_possible_cpu(cpu) {
4147                        struct pool_workqueue *pwq =
4148                                per_cpu_ptr(wq->cpu_pwqs, cpu);
4149                        struct worker_pool *cpu_pools =
4150                                per_cpu(cpu_worker_pools, cpu);
4151
4152                        init_pwq(pwq, wq, &cpu_pools[highpri]);
4153
4154                        mutex_lock(&wq->mutex);
4155                        link_pwq(pwq);
4156                        mutex_unlock(&wq->mutex);
4157                }
4158                return 0;
4159        }
4160
4161        get_online_cpus();
4162        if (wq->flags & __WQ_ORDERED) {
4163                ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
4164                /* there should only be single pwq for ordering guarantee */
4165                WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
4166                              wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
4167                     "ordering guarantee broken for workqueue %s\n", wq->name);
4168        } else {
4169                ret = apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
4170        }
4171        put_online_cpus();
4172
4173        return ret;
4174}
4175
4176static int wq_clamp_max_active(int max_active, unsigned int flags,
4177                               const char *name)
4178{
4179        int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
4180
4181        if (max_active < 1 || max_active > lim)
4182                pr_warn("workqueue: max_active %d requested for %s is out of range, clamping between %d and %d\n",
4183                        max_active, name, 1, lim);
4184
4185        return clamp_val(max_active, 1, lim);
4186}
4187
4188/*
4189 * Workqueues which may be used during memory reclaim should have a rescuer
4190 * to guarantee forward progress.
4191 */
4192static int init_rescuer(struct workqueue_struct *wq)
4193{
4194        struct worker *rescuer;
4195        int ret;
4196
4197        if (!(wq->flags & WQ_MEM_RECLAIM))
4198                return 0;
4199
4200        rescuer = alloc_worker(NUMA_NO_NODE);
4201        if (!rescuer)
4202                return -ENOMEM;
4203
4204        rescuer->rescue_wq = wq;
4205        rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", wq->name);
4206        ret = PTR_ERR_OR_ZERO(rescuer->task);
4207        if (ret) {
4208                kfree(rescuer);
4209                return ret;
4210        }
4211
4212        wq->rescuer = rescuer;
4213        kthread_bind_mask(rescuer->task, cpu_possible_mask);
4214        wake_up_process(rescuer->task);
4215
4216        return 0;
4217}
4218
4219__printf(1, 4)
4220struct workqueue_struct *alloc_workqueue(const char *fmt,
4221                                         unsigned int flags,
4222                                         int max_active, ...)
4223{
4224        size_t tbl_size = 0;
4225        va_list args;
4226        struct workqueue_struct *wq;
4227        struct pool_workqueue *pwq;
4228
4229        /*
4230         * Unbound && max_active == 1 used to imply ordered, which is no
4231         * longer the case on NUMA machines due to per-node pools.  While
4232         * alloc_ordered_workqueue() is the right way to create an ordered
4233         * workqueue, keep the previous behavior to avoid subtle breakages
4234         * on NUMA.
4235         */
4236        if ((flags & WQ_UNBOUND) && max_active == 1)
4237                flags |= __WQ_ORDERED;
4238
4239        /* see the comment above the definition of WQ_POWER_EFFICIENT */
4240        if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
4241                flags |= WQ_UNBOUND;
4242
4243        /* allocate wq and format name */
4244        if (flags & WQ_UNBOUND)
4245                tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
4246
4247        wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
4248        if (!wq)
4249                return NULL;
4250
4251        if (flags & WQ_UNBOUND) {
4252                wq->unbound_attrs = alloc_workqueue_attrs();
4253                if (!wq->unbound_attrs)
4254                        goto err_free_wq;
4255        }
4256
4257        va_start(args, max_active);
4258        vsnprintf(wq->name, sizeof(wq->name), fmt, args);
4259        va_end(args);
4260
4261        max_active = max_active ?: WQ_DFL_ACTIVE;
4262        max_active = wq_clamp_max_active(max_active, flags, wq->name);
4263
4264        /* init wq */
4265        wq->flags = flags;
4266        wq->saved_max_active = max_active;
4267        mutex_init(&wq->mutex);
4268        atomic_set(&wq->nr_pwqs_to_flush, 0);
4269        INIT_LIST_HEAD(&wq->pwqs);
4270        INIT_LIST_HEAD(&wq->flusher_queue);
4271        INIT_LIST_HEAD(&wq->flusher_overflow);
4272        INIT_LIST_HEAD(&wq->maydays);
4273
4274        wq_init_lockdep(wq);
4275        INIT_LIST_HEAD(&wq->list);
4276
4277        if (alloc_and_link_pwqs(wq) < 0)
4278                goto err_unreg_lockdep;
4279
4280        if (wq_online && init_rescuer(wq) < 0)
4281                goto err_destroy;
4282
4283        if ((wq->flags & WQ_SYSFS) && workqueue_sysfs_register(wq))
4284                goto err_destroy;
4285
4286        /*
4287         * wq_pool_mutex protects global freeze state and workqueues list.
4288         * Grab it, adjust max_active and add the new @wq to workqueues
4289         * list.
4290         */
4291        mutex_lock(&wq_pool_mutex);
4292
4293        mutex_lock(&wq->mutex);
4294        for_each_pwq(pwq, wq)
4295                pwq_adjust_max_active(pwq);
4296        mutex_unlock(&wq->mutex);
4297
4298        list_add_tail_rcu(&wq->list, &workqueues);
4299
4300        mutex_unlock(&wq_pool_mutex);
4301
4302        return wq;
4303
4304err_unreg_lockdep:
4305        wq_unregister_lockdep(wq);
4306        wq_free_lockdep(wq);
4307err_free_wq:
4308        free_workqueue_attrs(wq->unbound_attrs);
4309        kfree(wq);
4310        return NULL;
4311err_destroy:
4312        destroy_workqueue(wq);
4313        return NULL;
4314}
4315EXPORT_SYMBOL_GPL(alloc_workqueue);
4316
4317/**
4318 * destroy_workqueue - safely terminate a workqueue
4319 * @wq: target workqueue
4320 *
4321 * Safely destroy a workqueue. All work currently pending will be done first.
4322 */
4323void destroy_workqueue(struct workqueue_struct *wq)
4324{
4325        struct pool_workqueue *pwq;
4326        int node;
4327
4328        /* drain it before proceeding with destruction */
4329        drain_workqueue(wq);
4330
4331        /* sanity checks */
4332        mutex_lock(&wq->mutex);
4333        for_each_pwq(pwq, wq) {
4334                int i;
4335
4336                for (i = 0; i < WORK_NR_COLORS; i++) {
4337                        if (WARN_ON(pwq->nr_in_flight[i])) {
4338                                mutex_unlock(&wq->mutex);
4339                                show_workqueue_state();
4340                                return;
4341                        }
4342                }
4343
4344                if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
4345                    WARN_ON(pwq->nr_active) ||
4346                    WARN_ON(!list_empty(&pwq->delayed_works))) {
4347                        mutex_unlock(&wq->mutex);
4348                        show_workqueue_state();
4349                        return;
4350                }
4351        }
4352        mutex_unlock(&wq->mutex);
4353
4354        /*
4355         * wq list is used to freeze wq, remove from list after
4356         * flushing is complete in case freeze races us.
4357         */
4358        mutex_lock(&wq_pool_mutex);
4359        list_del_rcu(&wq->list);
4360        mutex_unlock(&wq_pool_mutex);
4361
4362        workqueue_sysfs_unregister(wq);
4363
4364        if (wq->rescuer)
4365                kthread_stop(wq->rescuer->task);
4366
4367        if (!(wq->flags & WQ_UNBOUND)) {
4368                wq_unregister_lockdep(wq);
4369                /*
4370                 * The base ref is never dropped on per-cpu pwqs.  Directly
4371                 * schedule RCU free.
4372                 */
4373                call_rcu(&wq->rcu, rcu_free_wq);
4374        } else {
4375                /*
4376                 * We're the sole accessor of @wq at this point.  Directly
4377                 * access numa_pwq_tbl[] and dfl_pwq to put the base refs.
4378                 * @wq will be freed when the last pwq is released.
4379                 */
4380                for_each_node(node) {
4381                        pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
4382                        RCU_INIT_POINTER(wq->numa_pwq_tbl[node], NULL);
4383                        put_pwq_unlocked(pwq);
4384                }
4385
4386                /*
4387                 * Put dfl_pwq.  @wq may be freed any time after dfl_pwq is
4388                 * put.  Don't access it afterwards.
4389                 */
4390                pwq = wq->dfl_pwq;
4391                wq->dfl_pwq = NULL;
4392                put_pwq_unlocked(pwq);
4393        }
4394}
4395EXPORT_SYMBOL_GPL(destroy_workqueue);
4396
4397/**
4398 * workqueue_set_max_active - adjust max_active of a workqueue
4399 * @wq: target workqueue
4400 * @max_active: new max_active value.
4401 *
4402 * Set max_active of @wq to @max_active.
4403 *
4404 * CONTEXT:
4405 * Don't call from IRQ context.
4406 */
4407void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4408{
4409        struct pool_workqueue *pwq;
4410
4411        /* disallow meddling with max_active for ordered workqueues */
4412        if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4413                return;
4414
4415        max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4416
4417        mutex_lock(&wq->mutex);
4418
4419        wq->flags &= ~__WQ_ORDERED;
4420        wq->saved_max_active = max_active;
4421
4422        for_each_pwq(pwq, wq)
4423                pwq_adjust_max_active(pwq);
4424
4425        mutex_unlock(&wq->mutex);
4426}
4427EXPORT_SYMBOL_GPL(workqueue_set_max_active);
4428
4429/**
4430 * current_work - retrieve %current task's work struct
4431 *
4432 * Determine if %current task is a workqueue worker and what it's working on.
4433 * Useful to find out the context that the %current task is running in.
4434 *
4435 * Return: work struct if %current task is a workqueue worker, %NULL otherwise.
4436 */
4437struct work_struct *current_work(void)
4438{
4439        struct worker *worker = current_wq_worker();
4440
4441        return worker ? worker->current_work : NULL;
4442}
4443EXPORT_SYMBOL(current_work);
4444
4445/**
4446 * current_is_workqueue_rescuer - is %current workqueue rescuer?
4447 *
4448 * Determine whether %current is a workqueue rescuer.  Can be used from
4449 * work functions to determine whether it's being run off the rescuer task.
4450 *
4451 * Return: %true if %current is a workqueue rescuer. %false otherwise.
4452 */
4453bool current_is_workqueue_rescuer(void)
4454{
4455        struct worker *worker = current_wq_worker();
4456
4457        return worker && worker->rescue_wq;
4458}
4459
4460/**
4461 * workqueue_congested - test whether a workqueue is congested
4462 * @cpu: CPU in question
4463 * @wq: target workqueue
4464 *
4465 * Test whether @wq's cpu workqueue for @cpu is congested.  There is
4466 * no synchronization around this function and the test result is
4467 * unreliable and only useful as advisory hints or for debugging.
4468 *
4469 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4470 * Note that both per-cpu and unbound workqueues may be associated with
4471 * multiple pool_workqueues which have separate congested states.  A
4472 * workqueue being congested on one CPU doesn't mean the workqueue is also
4473 * contested on other CPUs / NUMA nodes.
4474 *
4475 * Return:
4476 * %true if congested, %false otherwise.
4477 */
4478bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4479{
4480        struct pool_workqueue *pwq;
4481        bool ret;
4482
4483        rcu_read_lock();
4484        preempt_disable();
4485
4486        if (cpu == WORK_CPU_UNBOUND)
4487                cpu = smp_processor_id();
4488
4489        if (!(wq->flags & WQ_UNBOUND))
4490                pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4491        else
4492                pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
4493
4494        ret = !list_empty(&pwq->delayed_works);
4495        preempt_enable();
4496        rcu_read_unlock();
4497
4498        return ret;
4499}
4500EXPORT_SYMBOL_GPL(workqueue_congested);
4501
4502/**
4503 * work_busy - test whether a work is currently pending or running
4504 * @work: the work to be tested
4505 *
4506 * Test whether @work is currently pending or running.  There is no
4507 * synchronization around this function and the test result is
4508 * unreliable and only useful as advisory hints or for debugging.
4509 *
4510 * Return:
4511 * OR'd bitmask of WORK_BUSY_* bits.
4512 */
4513unsigned int work_busy(struct work_struct *work)
4514{
4515        struct worker_pool *pool;
4516        unsigned long flags;
4517        unsigned int ret = 0;
4518
4519        if (work_pending(work))
4520                ret |= WORK_BUSY_PENDING;
4521
4522        rcu_read_lock();
4523        pool = get_work_pool(work);
4524        if (pool) {
4525                spin_lock_irqsave(&pool->lock, flags);
4526                if (find_worker_executing_work(pool, work))
4527                        ret |= WORK_BUSY_RUNNING;
4528                spin_unlock_irqrestore(&pool->lock, flags);
4529        }
4530        rcu_read_unlock();
4531
4532        return ret;
4533}
4534EXPORT_SYMBOL_GPL(work_busy);
4535
4536/**
4537 * set_worker_desc - set description for the current work item
4538 * @fmt: printf-style format string
4539 * @...: arguments for the format string
4540 *
4541 * This function can be called by a running work function to describe what
4542 * the work item is about.  If the worker task gets dumped, this
4543 * information will be printed out together to help debugging.  The
4544 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4545 */
4546void set_worker_desc(const char *fmt, ...)
4547{
4548        struct worker *worker = current_wq_worker();
4549        va_list args;
4550
4551        if (worker) {
4552                va_start(args, fmt);
4553                vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4554                va_end(args);
4555        }
4556}
4557EXPORT_SYMBOL_GPL(set_worker_desc);
4558
4559/**
4560 * print_worker_info - print out worker information and description
4561 * @log_lvl: the log level to use when printing
4562 * @task: target task
4563 *
4564 * If @task is a worker and currently executing a work item, print out the
4565 * name of the workqueue being serviced and worker description set with
4566 * set_worker_desc() by the currently executing work item.
4567 *
4568 * This function can be safely called on any task as long as the
4569 * task_struct itself is accessible.  While safe, this function isn't
4570 * synchronized and may print out mixups or garbages of limited length.
4571 */
4572void print_worker_info(const char *log_lvl, struct task_struct *task)
4573{
4574        work_func_t *fn = NULL;
4575        char name[WQ_NAME_LEN] = { };
4576        char desc[WORKER_DESC_LEN] = { };
4577        struct pool_workqueue *pwq = NULL;
4578        struct workqueue_struct *wq = NULL;
4579        struct worker *worker;
4580
4581        if (!(task->flags & PF_WQ_WORKER))
4582                return;
4583
4584        /*
4585         * This function is called without any synchronization and @task
4586         * could be in any state.  Be careful with dereferences.
4587         */
4588        worker = kthread_probe_data(task);
4589
4590        /*
4591         * Carefully copy the associated workqueue's workfn, name and desc.
4592         * Keep the original last '\0' in case the original is garbage.
4593         */
4594        probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
4595        probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
4596        probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
4597        probe_kernel_read(name, wq->name, sizeof(name) - 1);
4598        probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
4599
4600        if (fn || name[0] || desc[0]) {
4601                printk("%sWorkqueue: %s %ps", log_lvl, name, fn);
4602                if (strcmp(name, desc))
4603                        pr_cont(" (%s)", desc);
4604                pr_cont("\n");
4605        }
4606}
4607
4608static void pr_cont_pool_info(struct worker_pool *pool)
4609{
4610        pr_cont(" cpus=%*pbl", nr_cpumask_bits, pool->attrs->cpumask);
4611        if (pool->node != NUMA_NO_NODE)
4612                pr_cont(" node=%d", pool->node);
4613        pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
4614}
4615
4616static void pr_cont_work(bool comma, struct work_struct *work)
4617{
4618        if (work->func == wq_barrier_func) {
4619                struct wq_barrier *barr;
4620
4621                barr = container_of(work, struct wq_barrier, work);
4622
4623                pr_cont("%s BAR(%d)", comma ? "," : "",
4624                        task_pid_nr(barr->task));
4625        } else {
4626                pr_cont("%s %ps", comma ? "," : "", work->func);
4627        }
4628}
4629
4630static void show_pwq(struct pool_workqueue *pwq)
4631{
4632        struct worker_pool *pool = pwq->pool;
4633        struct work_struct *work;
4634        struct worker *worker;
4635        bool has_in_flight = false, has_pending = false;
4636        int bkt;
4637
4638        pr_info("  pwq %d:", pool->id);
4639        pr_cont_pool_info(pool);
4640
4641        pr_cont(" active=%d/%d%s\n", pwq->nr_active, pwq->max_active,
4642                !list_empty(&pwq->mayday_node) ? " MAYDAY" : "");
4643
4644        hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4645                if (worker->current_pwq == pwq) {
4646                        has_in_flight = true;
4647                        break;
4648                }
4649        }
4650        if (has_in_flight) {
4651                bool comma = false;
4652
4653                pr_info("    in-flight:");
4654                hash_for_each(pool->busy_hash, bkt, worker, hentry) {
4655                        if (worker->current_pwq != pwq)
4656                                continue;
4657
4658                        pr_cont("%s %d%s:%ps", comma ? "," : "",
4659                                task_pid_nr(worker->task),
4660                                worker == pwq->wq->rescuer ? "(RESCUER)" : "",
4661                                worker->current_func);
4662                        list_for_each_entry(work, &worker->scheduled, entry)
4663                                pr_cont_work(false, work);
4664                        comma = true;
4665                }
4666                pr_cont("\n");
4667        }
4668
4669        list_for_each_entry(work, &pool->worklist, entry) {
4670                if (get_work_pwq(work) == pwq) {
4671                        has_pending = true;
4672                        break;
4673                }
4674        }
4675        if (has_pending) {
4676                bool comma = false;
4677
4678                pr_info("    pending:");
4679                list_for_each_entry(work, &pool->worklist, entry) {
4680                        if (get_work_pwq(work) != pwq)
4681                                continue;
4682
4683                        pr_cont_work(comma, work);
4684                        comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4685                }
4686                pr_cont("\n");
4687        }
4688
4689        if (!list_empty(&pwq->delayed_works)) {
4690                bool comma = false;
4691
4692                pr_info("    delayed:");
4693                list_for_each_entry(work, &pwq->delayed_works, entry) {
4694                        pr_cont_work(comma, work);
4695                        comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
4696                }
4697                pr_cont("\n");
4698        }
4699}
4700
4701/**
4702 * show_workqueue_state - dump workqueue state
4703 *
4704 * Called from a sysrq handler or try_to_freeze_tasks() and prints out
4705 * all busy workqueues and pools.
4706 */
4707void show_workqueue_state(void)
4708{
4709        struct workqueue_struct *wq;
4710        struct worker_pool *pool;
4711        unsigned long flags;
4712        int pi;
4713
4714        rcu_read_lock();
4715
4716        pr_info("Showing busy workqueues and worker pools:\n");
4717
4718        list_for_each_entry_rcu(wq, &workqueues, list) {
4719                struct pool_workqueue *pwq;
4720                bool idle = true;
4721
4722                for_each_pwq(pwq, wq) {
4723                        if (pwq->nr_active || !list_empty(&pwq->delayed_works)) {
4724                                idle = false;
4725                                break;
4726                        }
4727                }
4728                if (idle)
4729                        continue;
4730
4731                pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
4732
4733                for_each_pwq(pwq, wq) {
4734                        spin_lock_irqsave(&pwq->pool->lock, flags);
4735                        if (pwq->nr_active || !list_empty(&pwq->delayed_works))
4736                                show_pwq(pwq);
4737                        spin_unlock_irqrestore(&pwq->pool->lock, flags);
4738                        /*
4739                         * We could be printing a lot from atomic context, e.g.
4740                         * sysrq-t -> show_workqueue_state(). Avoid triggering
4741                         * hard lockup.
4742                         */
4743                        touch_nmi_watchdog();
4744                }
4745        }
4746
4747        for_each_pool(pool, pi) {
4748                struct worker *worker;
4749                bool first = true;
4750
4751                spin_lock_irqsave(&pool->lock, flags);
4752                if (pool->nr_workers == pool->nr_idle)
4753                        goto next_pool;
4754
4755                pr_info("pool %d:", pool->id);
4756                pr_cont_pool_info(pool);
4757                pr_cont(" hung=%us workers=%d",
4758                        jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000,
4759                        pool->nr_workers);
4760                if (pool->manager)
4761                        pr_cont(" manager: %d",
4762                                task_pid_nr(pool->manager->task));
4763                list_for_each_entry(worker, &pool->idle_list, entry) {
4764                        pr_cont(" %s%d", first ? "idle: " : "",
4765                                task_pid_nr(worker->task));
4766                        first = false;
4767                }
4768                pr_cont("\n");
4769        next_pool:
4770                spin_unlock_irqrestore(&pool->lock, flags);
4771                /*
4772                 * We could be printing a lot from atomic context, e.g.
4773                 * sysrq-t -> show_workqueue_state(). Avoid triggering
4774                 * hard lockup.
4775                 */
4776                touch_nmi_watchdog();
4777        }
4778
4779        rcu_read_unlock();
4780}
4781
4782/* used to show worker information through /proc/PID/{comm,stat,status} */
4783void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
4784{
4785        int off;
4786
4787        /* always show the actual comm */
4788        off = strscpy(buf, task->comm, size);
4789        if (off < 0)
4790                return;
4791
4792        /* stabilize PF_WQ_WORKER and worker pool association */
4793        mutex_lock(&wq_pool_attach_mutex);
4794
4795        if (task->flags & PF_WQ_WORKER) {
4796                struct worker *worker = kthread_data(task);
4797                struct worker_pool *pool = worker->pool;
4798
4799                if (pool) {
4800                        spin_lock_irq(&pool->lock);
4801                        /*
4802                         * ->desc tracks information (wq name or
4803                         * set_worker_desc()) for the latest execution.  If
4804                         * current, prepend '+', otherwise '-'.
4805                         */
4806                        if (worker->desc[0] != '\0') {
4807                                if (worker->current_work)
4808                                        scnprintf(buf + off, size - off, "+%s",
4809                                                  worker->desc);
4810                                else
4811                                        scnprintf(buf + off, size - off, "-%s",
4812                                                  worker->desc);
4813                        }
4814                        spin_unlock_irq(&pool->lock);
4815                }
4816        }
4817
4818        mutex_unlock(&wq_pool_attach_mutex);
4819}
4820
4821#ifdef CONFIG_SMP
4822
4823/*
4824 * CPU hotplug.
4825 *
4826 * There are two challenges in supporting CPU hotplug.  Firstly, there
4827 * are a lot of assumptions on strong associations among work, pwq and
4828 * pool which make migrating pending and scheduled works very
4829 * difficult to implement without impacting hot paths.  Secondly,
4830 * worker pools serve mix of short, long and very long running works making
4831 * blocked draining impractical.
4832 *
4833 * This is solved by allowing the pools to be disassociated from the CPU
4834 * running as an unbound one and allowing it to be reattached later if the
4835 * cpu comes back online.
4836 */
4837
4838static void unbind_workers(int cpu)
4839{
4840        struct worker_pool *pool;
4841        struct worker *worker;
4842
4843        for_each_cpu_worker_pool(pool, cpu) {
4844                mutex_lock(&wq_pool_attach_mutex);
4845                spin_lock_irq(&pool->lock);
4846
4847                /*
4848                 * We've blocked all attach/detach operations. Make all workers
4849                 * unbound and set DISASSOCIATED.  Before this, all workers
4850                 * except for the ones which are still executing works from
4851                 * before the last CPU down must be on the cpu.  After
4852                 * this, they may become diasporas.
4853                 */
4854                for_each_pool_worker(worker, pool)
4855                        worker->flags |= WORKER_UNBOUND;
4856
4857                pool->flags |= POOL_DISASSOCIATED;
4858
4859                spin_unlock_irq(&pool->lock);
4860                mutex_unlock(&wq_pool_attach_mutex);
4861
4862                /*
4863                 * Call schedule() so that we cross rq->lock and thus can
4864                 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
4865                 * This is necessary as scheduler callbacks may be invoked
4866                 * from other cpus.
4867                 */
4868                schedule();
4869
4870                /*
4871                 * Sched callbacks are disabled now.  Zap nr_running.
4872                 * After this, nr_running stays zero and need_more_worker()
4873                 * and keep_working() are always true as long as the
4874                 * worklist is not empty.  This pool now behaves as an
4875                 * unbound (in terms of concurrency management) pool which
4876                 * are served by workers tied to the pool.
4877                 */
4878                atomic_set(&pool->nr_running, 0);
4879
4880                /*
4881                 * With concurrency management just turned off, a busy
4882                 * worker blocking could lead to lengthy stalls.  Kick off
4883                 * unbound chain execution of currently pending work items.
4884                 */
4885                spin_lock_irq(&pool->lock);
4886                wake_up_worker(pool);
4887                spin_unlock_irq(&pool->lock);
4888        }
4889}
4890
4891/**
4892 * rebind_workers - rebind all workers of a pool to the associated CPU
4893 * @pool: pool of interest
4894 *
4895 * @pool->cpu is coming online.  Rebind all workers to the CPU.
4896 */
4897static void rebind_workers(struct worker_pool *pool)
4898{
4899        struct worker *worker;
4900
4901        lockdep_assert_held(&wq_pool_attach_mutex);
4902
4903        /*
4904         * Restore CPU affinity of all workers.  As all idle workers should
4905         * be on the run-queue of the associated CPU before any local
4906         * wake-ups for concurrency management happen, restore CPU affinity
4907         * of all workers first and then clear UNBOUND.  As we're called
4908         * from CPU_ONLINE, the following shouldn't fail.
4909         */
4910        for_each_pool_worker(worker, pool)
4911                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task,
4912                                                  pool->attrs->cpumask) < 0);
4913
4914        spin_lock_irq(&pool->lock);
4915
4916        pool->flags &= ~POOL_DISASSOCIATED;
4917
4918        for_each_pool_worker(worker, pool) {
4919                unsigned int worker_flags = worker->flags;
4920
4921                /*
4922                 * A bound idle worker should actually be on the runqueue
4923                 * of the associated CPU for local wake-ups targeting it to
4924                 * work.  Kick all idle workers so that they migrate to the
4925                 * associated CPU.  Doing this in the same loop as
4926                 * replacing UNBOUND with REBOUND is safe as no worker will
4927                 * be bound before @pool->lock is released.
4928                 */
4929                if (worker_flags & WORKER_IDLE)
4930                        wake_up_process(worker->task);
4931
4932                /*
4933                 * We want to clear UNBOUND but can't directly call
4934                 * worker_clr_flags() or adjust nr_running.  Atomically
4935                 * replace UNBOUND with another NOT_RUNNING flag REBOUND.
4936                 * @worker will clear REBOUND using worker_clr_flags() when
4937                 * it initiates the next execution cycle thus restoring
4938                 * concurrency management.  Note that when or whether
4939                 * @worker clears REBOUND doesn't affect correctness.
4940                 *
4941                 * WRITE_ONCE() is necessary because @worker->flags may be
4942                 * tested without holding any lock in
4943                 * wq_worker_running().  Without it, NOT_RUNNING test may
4944                 * fail incorrectly leading to premature concurrency
4945                 * management operations.
4946                 */
4947                WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
4948                worker_flags |= WORKER_REBOUND;
4949                worker_flags &= ~WORKER_UNBOUND;
4950                WRITE_ONCE(worker->flags, worker_flags);
4951        }
4952
4953        spin_unlock_irq(&pool->lock);
4954}
4955
4956/**
4957 * restore_unbound_workers_cpumask - restore cpumask of unbound workers
4958 * @pool: unbound pool of interest
4959 * @cpu: the CPU which is coming up
4960 *
4961 * An unbound pool may end up with a cpumask which doesn't have any online
4962 * CPUs.  When a worker of such pool get scheduled, the scheduler resets
4963 * its cpus_allowed.  If @cpu is in @pool's cpumask which didn't have any
4964 * online CPU before, cpus_allowed of all its workers should be restored.
4965 */
4966static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4967{
4968        static cpumask_t cpumask;
4969        struct worker *worker;
4970
4971        lockdep_assert_held(&wq_pool_attach_mutex);
4972
4973        /* is @cpu allowed for @pool? */
4974        if (!cpumask_test_cpu(cpu, pool->attrs->cpumask))
4975                return;
4976
4977        cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask);
4978
4979        /* as we're called from CPU_ONLINE, the following shouldn't fail */
4980        for_each_pool_worker(worker, pool)
4981                WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0);
4982}
4983
4984int workqueue_prepare_cpu(unsigned int cpu)
4985{
4986        struct worker_pool *pool;
4987
4988        for_each_cpu_worker_pool(pool, cpu) {
4989                if (pool->nr_workers)
4990                        continue;
4991                if (!create_worker(pool))
4992                        return -ENOMEM;
4993        }
4994        return 0;
4995}
4996
4997int workqueue_online_cpu(unsigned int cpu)
4998{
4999        struct worker_pool *pool;
5000        struct workqueue_struct *wq;
5001        int pi;
5002
5003        mutex_lock(&wq_pool_mutex);
5004
5005        for_each_pool(pool, pi) {
5006                mutex_lock(&wq_pool_attach_mutex);
5007
5008                if (pool->cpu == cpu)
5009                        rebind_workers(pool);
5010                else if (pool->cpu < 0)
5011                        restore_unbound_workers_cpumask(pool, cpu);
5012
5013                mutex_unlock(&wq_pool_attach_mutex);
5014        }
5015
5016        /* update NUMA affinity of unbound workqueues */
5017        list_for_each_entry(wq, &workqueues, list)
5018                wq_update_unbound_numa(wq, cpu, true);
5019
5020        mutex_unlock(&wq_pool_mutex);
5021        return 0;
5022}
5023
5024int workqueue_offline_cpu(unsigned int cpu)
5025{
5026        struct workqueue_struct *wq;
5027
5028        /* unbinding per-cpu workers should happen on the local CPU */
5029        if (WARN_ON(cpu != smp_processor_id()))
5030                return -1;
5031
5032        unbind_workers(cpu);
5033
5034        /* update NUMA affinity of unbound workqueues */
5035        mutex_lock(&wq_pool_mutex);
5036        list_for_each_entry(wq, &workqueues, list)
5037                wq_update_unbound_numa(wq, cpu, false);
5038        mutex_unlock(&wq_pool_mutex);
5039
5040        return 0;
5041}
5042
5043struct work_for_cpu {
5044        struct work_struct work;
5045        long (*fn)(void *);
5046        void *arg;
5047        long ret;
5048};
5049
5050static void work_for_cpu_fn(struct work_struct *work)
5051{
5052        struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work);
5053
5054        wfc->ret = wfc->fn(wfc->arg);
5055}
5056
5057/**
5058 * work_on_cpu - run a function in thread context on a particular cpu
5059 * @cpu: the cpu to run on
5060 * @fn: the function to run
5061 * @arg: the function arg
5062 *
5063 * It is up to the caller to ensure that the cpu doesn't go offline.
5064 * The caller must not hold any locks which would prevent @fn from completing.
5065 *
5066 * Return: The value @fn returns.
5067 */
5068long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
5069{
5070        struct work_for_cpu wfc = { .fn = fn, .arg = arg };
5071
5072        INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
5073        schedule_work_on(cpu, &wfc.work);
5074        flush_work(&wfc.work);
5075        destroy_work_on_stack(&wfc.work);
5076        return wfc.ret;
5077}
5078EXPORT_SYMBOL_GPL(work_on_cpu);
5079
5080/**
5081 * work_on_cpu_safe - run a function in thread context on a particular cpu
5082 * @cpu: the cpu to run on
5083 * @fn:  the function to run
5084 * @arg: the function argument
5085 *
5086 * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
5087 * any locks which would prevent @fn from completing.
5088 *
5089 * Return: The value @fn returns.
5090 */
5091long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
5092{
5093        long ret = -ENODEV;
5094
5095        get_online_cpus();
5096        if (cpu_online(cpu))
5097                ret = work_on_cpu(cpu, fn, arg);
5098        put_online_cpus();
5099        return ret;
5100}
5101EXPORT_SYMBOL_GPL(work_on_cpu_safe);
5102#endif /* CONFIG_SMP */
5103
5104#ifdef CONFIG_FREEZER
5105
5106/**
5107 * freeze_workqueues_begin - begin freezing workqueues
5108 *
5109 * Start freezing workqueues.  After this function returns, all freezable
5110 * workqueues will queue new works to their delayed_works list instead of
5111 * pool->worklist.
5112 *
5113 * CONTEXT:
5114 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5115 */
5116void freeze_workqueues_begin(void)
5117{
5118        struct workqueue_struct *wq;
5119        struct pool_workqueue *pwq;
5120
5121        mutex_lock(&wq_pool_mutex);
5122
5123        WARN_ON_ONCE(workqueue_freezing);
5124        workqueue_freezing = true;
5125
5126        list_for_each_entry(wq, &workqueues, list) {
5127                mutex_lock(&wq->mutex);
5128                for_each_pwq(pwq, wq)
5129                        pwq_adjust_max_active(pwq);
5130                mutex_unlock(&wq->mutex);
5131        }
5132
5133        mutex_unlock(&wq_pool_mutex);
5134}
5135
5136/**
5137 * freeze_workqueues_busy - are freezable workqueues still busy?
5138 *
5139 * Check whether freezing is complete.  This function must be called
5140 * between freeze_workqueues_begin() and thaw_workqueues().
5141 *
5142 * CONTEXT:
5143 * Grabs and releases wq_pool_mutex.
5144 *
5145 * Return:
5146 * %true if some freezable workqueues are still busy.  %false if freezing
5147 * is complete.
5148 */
5149bool freeze_workqueues_busy(void)
5150{
5151        bool busy = false;
5152        struct workqueue_struct *wq;
5153        struct pool_workqueue *pwq;
5154
5155        mutex_lock(&wq_pool_mutex);
5156
5157        WARN_ON_ONCE(!workqueue_freezing);
5158
5159        list_for_each_entry(wq, &workqueues, list) {
5160                if (!(wq->flags & WQ_FREEZABLE))
5161                        continue;
5162                /*
5163                 * nr_active is monotonically decreasing.  It's safe
5164                 * to peek without lock.
5165                 */
5166                rcu_read_lock();
5167                for_each_pwq(pwq, wq) {
5168                        WARN_ON_ONCE(pwq->nr_active < 0);
5169                        if (pwq->nr_active) {
5170                                busy = true;
5171                                rcu_read_unlock();
5172                                goto out_unlock;
5173                        }
5174                }
5175                rcu_read_unlock();
5176        }
5177out_unlock:
5178        mutex_unlock(&wq_pool_mutex);
5179        return busy;
5180}
5181
5182/**
5183 * thaw_workqueues - thaw workqueues
5184 *
5185 * Thaw workqueues.  Normal queueing is restored and all collected
5186 * frozen works are transferred to their respective pool worklists.
5187 *
5188 * CONTEXT:
5189 * Grabs and releases wq_pool_mutex, wq->mutex and pool->lock's.
5190 */
5191void thaw_workqueues(void)
5192{
5193        struct workqueue_struct *wq;
5194        struct pool_workqueue *pwq;
5195
5196        mutex_lock(&wq_pool_mutex);
5197
5198        if (!workqueue_freezing)
5199                goto out_unlock;
5200
5201        workqueue_freezing = false;
5202
5203        /* restore max_active and repopulate worklist */
5204        list_for_each_entry(wq, &workqueues, list) {
5205                mutex_lock(&wq->mutex);
5206                for_each_pwq(pwq, wq)
5207                        pwq_adjust_max_active(pwq);
5208                mutex_unlock(&wq->mutex);
5209        }
5210
5211out_unlock:
5212        mutex_unlock(&wq_pool_mutex);
5213}
5214#endif /* CONFIG_FREEZER */
5215
5216static int workqueue_apply_unbound_cpumask(void)
5217{
5218        LIST_HEAD(ctxs);
5219        int ret = 0;
5220        struct workqueue_struct *wq;
5221        struct apply_wqattrs_ctx *ctx, *n;
5222
5223        lockdep_assert_held(&wq_pool_mutex);
5224
5225        list_for_each_entry(wq, &workqueues, list) {
5226                if (!(wq->flags & WQ_UNBOUND))
5227                        continue;
5228                /* creating multiple pwqs breaks ordering guarantee */
5229                if (wq->flags & __WQ_ORDERED)
5230                        continue;
5231
5232                ctx = apply_wqattrs_prepare(wq, wq->unbound_attrs);
5233                if (!ctx) {
5234                        ret = -ENOMEM;
5235                        break;
5236                }
5237
5238                list_add_tail(&ctx->list, &ctxs);
5239        }
5240
5241        list_for_each_entry_safe(ctx, n, &ctxs, list) {
5242                if (!ret)
5243                        apply_wqattrs_commit(ctx);
5244                apply_wqattrs_cleanup(ctx);
5245        }
5246
5247        return ret;
5248}
5249
5250/**
5251 *  workqueue_set_unbound_cpumask - Set the low-level unbound cpumask
5252 *  @cpumask: the cpumask to set
5253 *
5254 *  The low-level workqueues cpumask is a global cpumask that limits
5255 *  the affinity of all unbound workqueues.  This function check the @cpumask
5256 *  and apply it to all unbound workqueues and updates all pwqs of them.
5257 *
5258 *  Retun:      0       - Success
5259 *              -EINVAL - Invalid @cpumask
5260 *              -ENOMEM - Failed to allocate memory for attrs or pwqs.
5261 */
5262int workqueue_set_unbound_cpumask(cpumask_var_t cpumask)
5263{
5264        int ret = -EINVAL;
5265        cpumask_var_t saved_cpumask;
5266
5267        if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL))
5268                return -ENOMEM;
5269
5270        /*
5271         * Not excluding isolated cpus on purpose.
5272         * If the user wishes to include them, we allow that.
5273         */
5274        cpumask_and(cpumask, cpumask, cpu_possible_mask);
5275        if (!cpumask_empty(cpumask)) {
5276                apply_wqattrs_lock();
5277
5278                /* save the old wq_unbound_cpumask. */
5279                cpumask_copy(saved_cpumask, wq_unbound_cpumask);
5280
5281                /* update wq_unbound_cpumask at first and apply it to wqs. */
5282                cpumask_copy(wq_unbound_cpumask, cpumask);
5283                ret = workqueue_apply_unbound_cpumask();
5284
5285                /* restore the wq_unbound_cpumask when failed. */
5286                if (ret < 0)
5287                        cpumask_copy(wq_unbound_cpumask, saved_cpumask);
5288
5289                apply_wqattrs_unlock();
5290        }
5291
5292        free_cpumask_var(saved_cpumask);
5293        return ret;
5294}
5295
5296#ifdef CONFIG_SYSFS
5297/*
5298 * Workqueues with WQ_SYSFS flag set is visible to userland via
5299 * /sys/bus/workqueue/devices/WQ_NAME.  All visible workqueues have the
5300 * following attributes.
5301 *
5302 *  per_cpu     RO bool : whether the workqueue is per-cpu or unbound
5303 *  max_active  RW int  : maximum number of in-flight work items
5304 *
5305 * Unbound workqueues have the following extra attributes.
5306 *
5307 *  pool_ids    RO int  : the associated pool IDs for each node
5308 *  nice        RW int  : nice value of the workers
5309 *  cpumask     RW mask : bitmask of allowed CPUs for the workers
5310 *  numa        RW bool : whether enable NUMA affinity
5311 */
5312struct wq_device {
5313        struct workqueue_struct         *wq;
5314        struct device                   dev;
5315};
5316
5317static struct workqueue_struct *dev_to_wq(struct device *dev)
5318{
5319        struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5320
5321        return wq_dev->wq;
5322}
5323
5324static ssize_t per_cpu_show(struct device *dev, struct device_attribute *attr,
5325                            char *buf)
5326{
5327        struct workqueue_struct *wq = dev_to_wq(dev);
5328
5329        return scnprintf(buf, PAGE_SIZE, "%d\n", (bool)!(wq->flags & WQ_UNBOUND));
5330}
5331static DEVICE_ATTR_RO(per_cpu);
5332
5333static ssize_t max_active_show(struct device *dev,
5334                               struct device_attribute *attr, char *buf)
5335{
5336        struct workqueue_struct *wq = dev_to_wq(dev);
5337
5338        return scnprintf(buf, PAGE_SIZE, "%d\n", wq->saved_max_active);
5339}
5340
5341static ssize_t max_active_store(struct device *dev,
5342                                struct device_attribute *attr, const char *buf,
5343                                size_t count)
5344{
5345        struct workqueue_struct *wq = dev_to_wq(dev);
5346        int val;
5347
5348        if (sscanf(buf, "%d", &val) != 1 || val <= 0)
5349                return -EINVAL;
5350
5351        workqueue_set_max_active(wq, val);
5352        return count;
5353}
5354static DEVICE_ATTR_RW(max_active);
5355
5356static struct attribute *wq_sysfs_attrs[] = {
5357        &dev_attr_per_cpu.attr,
5358        &dev_attr_max_active.attr,
5359        NULL,
5360};
5361ATTRIBUTE_GROUPS(wq_sysfs);
5362
5363static ssize_t wq_pool_ids_show(struct device *dev,
5364                                struct device_attribute *attr, char *buf)
5365{
5366        struct workqueue_struct *wq = dev_to_wq(dev);
5367        const char *delim = "";
5368        int node, written = 0;
5369
5370        get_online_cpus();
5371        rcu_read_lock();
5372        for_each_node(node) {
5373                written += scnprintf(buf + written, PAGE_SIZE - written,
5374                                     "%s%d:%d", delim, node,
5375                                     unbound_pwq_by_node(wq, node)->pool->id);
5376                delim = " ";
5377        }
5378        written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
5379        rcu_read_unlock();
5380        put_online_cpus();
5381
5382        return written;
5383}
5384
5385static ssize_t wq_nice_show(struct device *dev, struct device_attribute *attr,
5386                            char *buf)
5387{
5388        struct workqueue_struct *wq = dev_to_wq(dev);
5389        int written;
5390
5391        mutex_lock(&wq->mutex);
5392        written = scnprintf(buf, PAGE_SIZE, "%d\n", wq->unbound_attrs->nice);
5393        mutex_unlock(&wq->mutex);
5394
5395        return written;
5396}
5397
5398/* prepare workqueue_attrs for sysfs store operations */
5399static struct workqueue_attrs *wq_sysfs_prep_attrs(struct workqueue_struct *wq)
5400{
5401        struct workqueue_attrs *attrs;
5402
5403        lockdep_assert_held(&wq_pool_mutex);
5404
5405        attrs = alloc_workqueue_attrs();
5406        if (!attrs)
5407                return NULL;
5408
5409        copy_workqueue_attrs(attrs, wq->unbound_attrs);
5410        return attrs;
5411}
5412
5413static ssize_t wq_nice_store(struct device *dev, struct device_attribute *attr,
5414                             const char *buf, size_t count)
5415{
5416        struct workqueue_struct *wq = dev_to_wq(dev);
5417        struct workqueue_attrs *attrs;
5418        int ret = -ENOMEM;
5419
5420        apply_wqattrs_lock();
5421
5422        attrs = wq_sysfs_prep_attrs(wq);
5423        if (!attrs)
5424                goto out_unlock;
5425
5426        if (sscanf(buf, "%d", &attrs->nice) == 1 &&
5427            attrs->nice >= MIN_NICE && attrs->nice <= MAX_NICE)
5428                ret = apply_workqueue_attrs_locked(wq, attrs);
5429        else
5430                ret = -EINVAL;
5431
5432out_unlock:
5433        apply_wqattrs_unlock();
5434        free_workqueue_attrs(attrs);
5435        return ret ?: count;
5436}
5437
5438static ssize_t wq_cpumask_show(struct device *dev,
5439                               struct device_attribute *attr, char *buf)
5440{
5441        struct workqueue_struct *wq = dev_to_wq(dev);
5442        int written;
5443
5444        mutex_lock(&wq->mutex);
5445        written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5446                            cpumask_pr_args(wq->unbound_attrs->cpumask));
5447        mutex_unlock(&wq->mutex);
5448        return written;
5449}
5450
5451static ssize_t wq_cpumask_store(struct device *dev,
5452                                struct device_attribute *attr,
5453                                const char *buf, size_t count)
5454{
5455        struct workqueue_struct *wq = dev_to_wq(dev);
5456        struct workqueue_attrs *attrs;
5457        int ret = -ENOMEM;
5458
5459        apply_wqattrs_lock();
5460
5461        attrs = wq_sysfs_prep_attrs(wq);
5462        if (!attrs)
5463                goto out_unlock;
5464
5465        ret = cpumask_parse(buf, attrs->cpumask);
5466        if (!ret)
5467                ret = apply_workqueue_attrs_locked(wq, attrs);
5468
5469out_unlock:
5470        apply_wqattrs_unlock();
5471        free_workqueue_attrs(attrs);
5472        return ret ?: count;
5473}
5474
5475static ssize_t wq_numa_show(struct device *dev, struct device_attribute *attr,
5476                            char *buf)
5477{
5478        struct workqueue_struct *wq = dev_to_wq(dev);
5479        int written;
5480
5481        mutex_lock(&wq->mutex);
5482        written = scnprintf(buf, PAGE_SIZE, "%d\n",
5483                            !wq->unbound_attrs->no_numa);
5484        mutex_unlock(&wq->mutex);
5485
5486        return written;
5487}
5488
5489static ssize_t wq_numa_store(struct device *dev, struct device_attribute *attr,
5490                             const char *buf, size_t count)
5491{
5492        struct workqueue_struct *wq = dev_to_wq(dev);
5493        struct workqueue_attrs *attrs;
5494        int v, ret = -ENOMEM;
5495
5496        apply_wqattrs_lock();
5497
5498        attrs = wq_sysfs_prep_attrs(wq);
5499        if (!attrs)
5500                goto out_unlock;
5501
5502        ret = -EINVAL;
5503        if (sscanf(buf, "%d", &v) == 1) {
5504                attrs->no_numa = !v;
5505                ret = apply_workqueue_attrs_locked(wq, attrs);
5506        }
5507
5508out_unlock:
5509        apply_wqattrs_unlock();
5510        free_workqueue_attrs(attrs);
5511        return ret ?: count;
5512}
5513
5514static struct device_attribute wq_sysfs_unbound_attrs[] = {
5515        __ATTR(pool_ids, 0444, wq_pool_ids_show, NULL),
5516        __ATTR(nice, 0644, wq_nice_show, wq_nice_store),
5517        __ATTR(cpumask, 0644, wq_cpumask_show, wq_cpumask_store),
5518        __ATTR(numa, 0644, wq_numa_show, wq_numa_store),
5519        __ATTR_NULL,
5520};
5521
5522static struct bus_type wq_subsys = {
5523        .name                           = "workqueue",
5524        .dev_groups                     = wq_sysfs_groups,
5525};
5526
5527static ssize_t wq_unbound_cpumask_show(struct device *dev,
5528                struct device_attribute *attr, char *buf)
5529{
5530        int written;
5531
5532        mutex_lock(&wq_pool_mutex);
5533        written = scnprintf(buf, PAGE_SIZE, "%*pb\n",
5534                            cpumask_pr_args(wq_unbound_cpumask));
5535        mutex_unlock(&wq_pool_mutex);
5536
5537        return written;
5538}
5539
5540static ssize_t wq_unbound_cpumask_store(struct device *dev,
5541                struct device_attribute *attr, const char *buf, size_t count)
5542{
5543        cpumask_var_t cpumask;
5544        int ret;
5545
5546        if (!zalloc_cpumask_var(&cpumask, GFP_KERNEL))
5547                return -ENOMEM;
5548
5549        ret = cpumask_parse(buf, cpumask);
5550        if (!ret)
5551                ret = workqueue_set_unbound_cpumask(cpumask);
5552
5553        free_cpumask_var(cpumask);
5554        return ret ? ret : count;
5555}
5556
5557static struct device_attribute wq_sysfs_cpumask_attr =
5558        __ATTR(cpumask, 0644, wq_unbound_cpumask_show,
5559               wq_unbound_cpumask_store);
5560
5561static int __init wq_sysfs_init(void)
5562{
5563        int err;
5564
5565        err = subsys_virtual_register(&wq_subsys, NULL);
5566        if (err)
5567                return err;
5568
5569        return device_create_file(wq_subsys.dev_root, &wq_sysfs_cpumask_attr);
5570}
5571core_initcall(wq_sysfs_init);
5572
5573static void wq_device_release(struct device *dev)
5574{
5575        struct wq_device *wq_dev = container_of(dev, struct wq_device, dev);
5576
5577        kfree(wq_dev);
5578}
5579
5580/**
5581 * workqueue_sysfs_register - make a workqueue visible in sysfs
5582 * @wq: the workqueue to register
5583 *
5584 * Expose @wq in sysfs under /sys/bus/workqueue/devices.
5585 * alloc_workqueue*() automatically calls this function if WQ_SYSFS is set
5586 * which is the preferred method.
5587 *
5588 * Workqueue user should use this function directly iff it wants to apply
5589 * workqueue_attrs before making the workqueue visible in sysfs; otherwise,
5590 * apply_workqueue_attrs() may race against userland updating the
5591 * attributes.
5592 *
5593 * Return: 0 on success, -errno on failure.
5594 */
5595int workqueue_sysfs_register(struct workqueue_struct *wq)
5596{
5597        struct wq_device *wq_dev;
5598        int ret;
5599
5600        /*
5601         * Adjusting max_active or creating new pwqs by applying
5602         * attributes breaks ordering guarantee.  Disallow exposing ordered
5603         * workqueues.
5604         */
5605        if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5606                return -EINVAL;
5607
5608        wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
5609        if (!wq_dev)
5610                return -ENOMEM;
5611
5612        wq_dev->wq = wq;
5613        wq_dev->dev.bus = &wq_subsys;
5614        wq_dev->dev.release = wq_device_release;
5615        dev_set_name(&wq_dev->dev, "%s", wq->name);
5616
5617        /*
5618         * unbound_attrs are created separately.  Suppress uevent until
5619         * everything is ready.
5620         */
5621        dev_set_uevent_suppress(&wq_dev->dev, true);
5622
5623        ret = device_register(&wq_dev->dev);
5624        if (ret) {
5625                put_device(&wq_dev->dev);
5626                wq->wq_dev = NULL;
5627                return ret;
5628        }
5629
5630        if (wq->flags & WQ_UNBOUND) {
5631                struct device_attribute *attr;
5632
5633                for (attr = wq_sysfs_unbound_attrs; attr->attr.name; attr++) {
5634                        ret = device_create_file(&wq_dev->dev, attr);
5635                        if (ret) {
5636                                device_unregister(&wq_dev->dev);
5637                                wq->wq_dev = NULL;
5638                                return ret;
5639                        }
5640                }
5641        }
5642
5643        dev_set_uevent_suppress(&wq_dev->dev, false);
5644        kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
5645        return 0;
5646}
5647
5648/**
5649 * workqueue_sysfs_unregister - undo workqueue_sysfs_register()
5650 * @wq: the workqueue to unregister
5651 *
5652 * If @wq is registered to sysfs by workqueue_sysfs_register(), unregister.
5653 */
5654static void workqueue_sysfs_unregister(struct workqueue_struct *wq)
5655{
5656        struct wq_device *wq_dev = wq->wq_dev;
5657
5658        if (!wq->wq_dev)
5659                return;
5660
5661        wq->wq_dev = NULL;
5662        device_unregister(&wq_dev->dev);
5663}
5664#else   /* CONFIG_SYSFS */
5665static void workqueue_sysfs_unregister(struct workqueue_struct *wq)     { }
5666#endif  /* CONFIG_SYSFS */
5667
5668/*
5669 * Workqueue watchdog.
5670 *
5671 * Stall may be caused by various bugs - missing WQ_MEM_RECLAIM, illegal
5672 * flush dependency, a concurrency managed work item which stays RUNNING
5673 * indefinitely.  Workqueue stalls can be very difficult to debug as the
5674 * usual warning mechanisms don't trigger and internal workqueue state is
5675 * largely opaque.
5676 *
5677 * Workqueue watchdog monitors all worker pools periodically and dumps
5678 * state if some pools failed to make forward progress for a while where
5679 * forward progress is defined as the first item on ->worklist changing.
5680 *
5681 * This mechanism is controlled through the kernel parameter
5682 * "workqueue.watchdog_thresh" which can be updated at runtime through the
5683 * corresponding sysfs parameter file.
5684 */
5685#ifdef CONFIG_WQ_WATCHDOG
5686
5687static unsigned long wq_watchdog_thresh = 30;
5688static struct timer_list wq_watchdog_timer;
5689
5690static unsigned long wq_watchdog_touched = INITIAL_JIFFIES;
5691static DEFINE_PER_CPU(unsigned long, wq_watchdog_touched_cpu) = INITIAL_JIFFIES;
5692
5693static void wq_watchdog_reset_touched(void)
5694{
5695        int cpu;
5696
5697        wq_watchdog_touched = jiffies;
5698        for_each_possible_cpu(cpu)
5699                per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5700}
5701
5702static void wq_watchdog_timer_fn(struct timer_list *unused)
5703{
5704        unsigned long thresh = READ_ONCE(wq_watchdog_thresh) * HZ;
5705        bool lockup_detected = false;
5706        struct worker_pool *pool;
5707        int pi;
5708
5709        if (!thresh)
5710                return;
5711
5712        rcu_read_lock();
5713
5714        for_each_pool(pool, pi) {
5715                unsigned long pool_ts, touched, ts;
5716
5717                if (list_empty(&pool->worklist))
5718                        continue;
5719
5720                /* get the latest of pool and touched timestamps */
5721                pool_ts = READ_ONCE(pool->watchdog_ts);
5722                touched = READ_ONCE(wq_watchdog_touched);
5723
5724                if (time_after(pool_ts, touched))
5725                        ts = pool_ts;
5726                else
5727                        ts = touched;
5728
5729                if (pool->cpu >= 0) {
5730                        unsigned long cpu_touched =
5731                                READ_ONCE(per_cpu(wq_watchdog_touched_cpu,
5732                                                  pool->cpu));
5733                        if (time_after(cpu_touched, ts))
5734                                ts = cpu_touched;
5735                }
5736
5737                /* did we stall? */
5738                if (time_after(jiffies, ts + thresh)) {
5739                        lockup_detected = true;
5740                        pr_emerg("BUG: workqueue lockup - pool");
5741                        pr_cont_pool_info(pool);
5742                        pr_cont(" stuck for %us!\n",
5743                                jiffies_to_msecs(jiffies - pool_ts) / 1000);
5744                }
5745        }
5746
5747        rcu_read_unlock();
5748
5749        if (lockup_detected)
5750                show_workqueue_state();
5751
5752        wq_watchdog_reset_touched();
5753        mod_timer(&wq_watchdog_timer, jiffies + thresh);
5754}
5755
5756notrace void wq_watchdog_touch(int cpu)
5757{
5758        if (cpu >= 0)
5759                per_cpu(wq_watchdog_touched_cpu, cpu) = jiffies;
5760        else
5761                wq_watchdog_touched = jiffies;
5762}
5763
5764static void wq_watchdog_set_thresh(unsigned long thresh)
5765{
5766        wq_watchdog_thresh = 0;
5767        del_timer_sync(&wq_watchdog_timer);
5768
5769        if (thresh) {
5770                wq_watchdog_thresh = thresh;
5771                wq_watchdog_reset_touched();
5772                mod_timer(&wq_watchdog_timer, jiffies + thresh * HZ);
5773        }
5774}
5775
5776static int wq_watchdog_param_set_thresh(const char *val,
5777                                        const struct kernel_param *kp)
5778{
5779        unsigned long thresh;
5780        int ret;
5781
5782        ret = kstrtoul(val, 0, &thresh);
5783        if (ret)
5784                return ret;
5785
5786        if (system_wq)
5787                wq_watchdog_set_thresh(thresh);
5788        else
5789                wq_watchdog_thresh = thresh;
5790
5791        return 0;
5792}
5793
5794static const struct kernel_param_ops wq_watchdog_thresh_ops = {
5795        .set    = wq_watchdog_param_set_thresh,
5796        .get    = param_get_ulong,
5797};
5798
5799module_param_cb(watchdog_thresh, &wq_watchdog_thresh_ops, &wq_watchdog_thresh,
5800                0644);
5801
5802static void wq_watchdog_init(void)
5803{
5804        timer_setup(&wq_watchdog_timer, wq_watchdog_timer_fn, TIMER_DEFERRABLE);
5805        wq_watchdog_set_thresh(wq_watchdog_thresh);
5806}
5807
5808#else   /* CONFIG_WQ_WATCHDOG */
5809
5810static inline void wq_watchdog_init(void) { }
5811
5812#endif  /* CONFIG_WQ_WATCHDOG */
5813
5814static void __init wq_numa_init(void)
5815{
5816        cpumask_var_t *tbl;
5817        int node, cpu;
5818
5819        if (num_possible_nodes() <= 1)
5820                return;
5821
5822        if (wq_disable_numa) {
5823                pr_info("workqueue: NUMA affinity support disabled\n");
5824                return;
5825        }
5826
5827        wq_update_unbound_numa_attrs_buf = alloc_workqueue_attrs();
5828        BUG_ON(!wq_update_unbound_numa_attrs_buf);
5829
5830        /*
5831         * We want masks of possible CPUs of each node which isn't readily
5832         * available.  Build one from cpu_to_node() which should have been
5833         * fully initialized by now.
5834         */
5835        tbl = kcalloc(nr_node_ids, sizeof(tbl[0]), GFP_KERNEL);
5836        BUG_ON(!tbl);
5837
5838        for_each_node(node)
5839                BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
5840                                node_online(node) ? node : NUMA_NO_NODE));
5841
5842        for_each_possible_cpu(cpu) {
5843                node = cpu_to_node(cpu);
5844                if (WARN_ON(node == NUMA_NO_NODE)) {
5845                        pr_warn("workqueue: NUMA node mapping not available for cpu%d, disabling NUMA support\n", cpu);
5846                        /* happens iff arch is bonkers, let's just proceed */
5847                        return;
5848                }
5849                cpumask_set_cpu(cpu, tbl[node]);
5850        }
5851
5852        wq_numa_possible_cpumask = tbl;
5853        wq_numa_enabled = true;
5854}
5855
5856/**
5857 * workqueue_init_early - early init for workqueue subsystem
5858 *
5859 * This is the first half of two-staged workqueue subsystem initialization
5860 * and invoked as soon as the bare basics - memory allocation, cpumasks and
5861 * idr are up.  It sets up all the data structures and system workqueues
5862 * and allows early boot code to create workqueues and queue/cancel work
5863 * items.  Actual work item execution starts only after kthreads can be
5864 * created and scheduled right before early initcalls.
5865 */
5866int __init workqueue_init_early(void)
5867{
5868        int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5869        int hk_flags = HK_FLAG_DOMAIN | HK_FLAG_WQ;
5870        int i, cpu;
5871
5872        WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long));
5873
5874        BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL));
5875        cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(hk_flags));
5876
5877        pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
5878
5879        /* initialize CPU pools */
5880        for_each_possible_cpu(cpu) {
5881                struct worker_pool *pool;
5882
5883                i = 0;
5884                for_each_cpu_worker_pool(pool, cpu) {
5885                        BUG_ON(init_worker_pool(pool));
5886                        pool->cpu = cpu;
5887                        cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
5888                        pool->attrs->nice = std_nice[i++];
5889                        pool->node = cpu_to_node(cpu);
5890
5891                        /* alloc pool ID */
5892                        mutex_lock(&wq_pool_mutex);
5893                        BUG_ON(worker_pool_assign_id(pool));
5894                        mutex_unlock(&wq_pool_mutex);
5895                }
5896        }
5897
5898        /* create default unbound and ordered wq attrs */
5899        for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5900                struct workqueue_attrs *attrs;
5901
5902                BUG_ON(!(attrs = alloc_workqueue_attrs()));
5903                attrs->nice = std_nice[i];
5904                unbound_std_wq_attrs[i] = attrs;
5905
5906                /*
5907                 * An ordered wq should have only one pwq as ordering is
5908                 * guaranteed by max_active which is enforced by pwqs.
5909                 * Turn off NUMA so that dfl_pwq is used for all nodes.
5910                 */
5911                BUG_ON(!(attrs = alloc_workqueue_attrs()));
5912                attrs->nice = std_nice[i];
5913                attrs->no_numa = true;
5914                ordered_wq_attrs[i] = attrs;
5915        }
5916
5917        system_wq = alloc_workqueue("events", 0, 0);
5918        system_highpri_wq = alloc_workqueue("events_highpri", WQ_HIGHPRI, 0);
5919        system_long_wq = alloc_workqueue("events_long", 0, 0);
5920        system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
5921                                            WQ_UNBOUND_MAX_ACTIVE);
5922        system_freezable_wq = alloc_workqueue("events_freezable",
5923                                              WQ_FREEZABLE, 0);
5924        system_power_efficient_wq = alloc_workqueue("events_power_efficient",
5925                                              WQ_POWER_EFFICIENT, 0);
5926        system_freezable_power_efficient_wq = alloc_workqueue("events_freezable_power_efficient",
5927                                              WQ_FREEZABLE | WQ_POWER_EFFICIENT,
5928                                              0);
5929        BUG_ON(!system_wq || !system_highpri_wq || !system_long_wq ||
5930               !system_unbound_wq || !system_freezable_wq ||
5931               !system_power_efficient_wq ||
5932               !system_freezable_power_efficient_wq);
5933
5934        return 0;
5935}
5936
5937/**
5938 * workqueue_init - bring workqueue subsystem fully online
5939 *
5940 * This is the latter half of two-staged workqueue subsystem initialization
5941 * and invoked as soon as kthreads can be created and scheduled.
5942 * Workqueues have been created and work items queued on them, but there
5943 * are no kworkers executing the work items yet.  Populate the worker pools
5944 * with the initial workers and enable future kworker creations.
5945 */
5946int __init workqueue_init(void)
5947{
5948        struct workqueue_struct *wq;
5949        struct worker_pool *pool;
5950        int cpu, bkt;
5951
5952        /*
5953         * It'd be simpler to initialize NUMA in workqueue_init_early() but
5954         * CPU to node mapping may not be available that early on some
5955         * archs such as power and arm64.  As per-cpu pools created
5956         * previously could be missing node hint and unbound pools NUMA
5957         * affinity, fix them up.
5958         *
5959         * Also, while iterating workqueues, create rescuers if requested.
5960         */
5961        wq_numa_init();
5962
5963        mutex_lock(&wq_pool_mutex);
5964
5965        for_each_possible_cpu(cpu) {
5966                for_each_cpu_worker_pool(pool, cpu) {
5967                        pool->node = cpu_to_node(cpu);
5968                }
5969        }
5970
5971        list_for_each_entry(wq, &workqueues, list) {
5972                wq_update_unbound_numa(wq, smp_processor_id(), true);
5973                WARN(init_rescuer(wq),
5974                     "workqueue: failed to create early rescuer for %s",
5975                     wq->name);
5976        }
5977
5978        mutex_unlock(&wq_pool_mutex);
5979
5980        /* create the initial workers */
5981        for_each_online_cpu(cpu) {
5982                for_each_cpu_worker_pool(pool, cpu) {
5983                        pool->flags &= ~POOL_DISASSOCIATED;
5984                        BUG_ON(!create_worker(pool));
5985                }
5986        }
5987
5988        hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
5989                BUG_ON(!create_worker(pool));
5990
5991        wq_online = true;
5992        wq_watchdog_init();
5993
5994        return 0;
5995}
5996