linux/kernel/workqueue.c
<<
>>
Prefs
   1/*
   2 * kernel/workqueue.c - generic async execution with shared worker pool
   3 *
   4 * Copyright (C) 2002           Ingo Molnar
   5 *
   6 *   Derived from the taskqueue/keventd code by:
   7 *     David Woodhouse <dwmw2@infradead.org>
   8 *     Andrew Morton
   9 *     Kai Petzke <wpp@marie.physik.tu-berlin.de>
  10 *     Theodore Ts'o <tytso@mit.edu>
  11 *
  12 * Made to use alloc_percpu by Christoph Lameter.
  13 *
  14 * Copyright (C) 2010           SUSE Linux Products GmbH
  15 * Copyright (C) 2010           Tejun Heo <tj@kernel.org>
  16 *
  17 * This is the generic async execution mechanism.  Work items as are
  18 * executed in process context.  The worker pool is shared and
  19 * automatically managed.  There is one worker pool for each CPU and
  20 * one extra for works which are better served by workers which are
  21 * not bound to any specific CPU.
  22 *
  23 * Please read Documentation/workqueue.txt for details.
  24 */
  25
  26#include <linux/module.h>
  27#include <linux/kernel.h>
  28#include <linux/sched.h>
  29#include <linux/init.h>
  30#include <linux/signal.h>
  31#include <linux/completion.h>
  32#include <linux/workqueue.h>
  33#include <linux/slab.h>
  34#include <linux/cpu.h>
  35#include <linux/notifier.h>
  36#include <linux/kthread.h>
  37#include <linux/hardirq.h>
  38#include <linux/mempolicy.h>
  39#include <linux/freezer.h>
  40#include <linux/kallsyms.h>
  41#include <linux/debug_locks.h>
  42#include <linux/lockdep.h>
  43#include <linux/idr.h>
  44
  45#include "workqueue_sched.h"
  46
  47enum {
  48        /* global_cwq flags */
  49        GCWQ_MANAGE_WORKERS     = 1 << 0,       /* need to manage workers */
  50        GCWQ_MANAGING_WORKERS   = 1 << 1,       /* managing workers */
  51        GCWQ_DISASSOCIATED      = 1 << 2,       /* cpu can't serve workers */
  52        GCWQ_FREEZING           = 1 << 3,       /* freeze in progress */
  53        GCWQ_HIGHPRI_PENDING    = 1 << 4,       /* highpri works on queue */
  54
  55        /* worker flags */
  56        WORKER_STARTED          = 1 << 0,       /* started */
  57        WORKER_DIE              = 1 << 1,       /* die die die */
  58        WORKER_IDLE             = 1 << 2,       /* is idle */
  59        WORKER_PREP             = 1 << 3,       /* preparing to run works */
  60        WORKER_ROGUE            = 1 << 4,       /* not bound to any cpu */
  61        WORKER_REBIND           = 1 << 5,       /* mom is home, come back */
  62        WORKER_CPU_INTENSIVE    = 1 << 6,       /* cpu intensive */
  63        WORKER_UNBOUND          = 1 << 7,       /* worker is unbound */
  64
  65        WORKER_NOT_RUNNING      = WORKER_PREP | WORKER_ROGUE | WORKER_REBIND |
  66                                  WORKER_CPU_INTENSIVE | WORKER_UNBOUND,
  67
  68        /* gcwq->trustee_state */
  69        TRUSTEE_START           = 0,            /* start */
  70        TRUSTEE_IN_CHARGE       = 1,            /* trustee in charge of gcwq */
  71        TRUSTEE_BUTCHER         = 2,            /* butcher workers */
  72        TRUSTEE_RELEASE         = 3,            /* release workers */
  73        TRUSTEE_DONE            = 4,            /* trustee is done */
  74
  75        BUSY_WORKER_HASH_ORDER  = 6,            /* 64 pointers */
  76        BUSY_WORKER_HASH_SIZE   = 1 << BUSY_WORKER_HASH_ORDER,
  77        BUSY_WORKER_HASH_MASK   = BUSY_WORKER_HASH_SIZE - 1,
  78
  79        MAX_IDLE_WORKERS_RATIO  = 4,            /* 1/4 of busy can be idle */
  80        IDLE_WORKER_TIMEOUT     = 300 * HZ,     /* keep idle ones for 5 mins */
  81
  82        MAYDAY_INITIAL_TIMEOUT  = HZ / 100 >= 2 ? HZ / 100 : 2,
  83                                                /* call for help after 10ms
  84                                                   (min two ticks) */
  85        MAYDAY_INTERVAL         = HZ / 10,      /* and then every 100ms */
  86        CREATE_COOLDOWN         = HZ,           /* time to breath after fail */
  87        TRUSTEE_COOLDOWN        = HZ / 10,      /* for trustee draining */
  88
  89        /*
  90         * Rescue workers are used only on emergencies and shared by
  91         * all cpus.  Give -20.
  92         */
  93        RESCUER_NICE_LEVEL      = -20,
  94};
  95
  96/*
  97 * Structure fields follow one of the following exclusion rules.
  98 *
  99 * I: Modifiable by initialization/destruction paths and read-only for
 100 *    everyone else.
 101 *
 102 * P: Preemption protected.  Disabling preemption is enough and should
 103 *    only be modified and accessed from the local cpu.
 104 *
 105 * L: gcwq->lock protected.  Access with gcwq->lock held.
 106 *
 107 * X: During normal operation, modification requires gcwq->lock and
 108 *    should be done only from local cpu.  Either disabling preemption
 109 *    on local cpu or grabbing gcwq->lock is enough for read access.
 110 *    If GCWQ_DISASSOCIATED is set, it's identical to L.
 111 *
 112 * F: wq->flush_mutex protected.
 113 *
 114 * W: workqueue_lock protected.
 115 */
 116
 117struct global_cwq;
 118
 119/*
 120 * The poor guys doing the actual heavy lifting.  All on-duty workers
 121 * are either serving the manager role, on idle list or on busy hash.
 122 */
 123struct worker {
 124        /* on idle list while idle, on busy hash table while busy */
 125        union {
 126                struct list_head        entry;  /* L: while idle */
 127                struct hlist_node       hentry; /* L: while busy */
 128        };
 129
 130        struct work_struct      *current_work;  /* L: work being processed */
 131        struct cpu_workqueue_struct *current_cwq; /* L: current_work's cwq */
 132        struct list_head        scheduled;      /* L: scheduled works */
 133        struct task_struct      *task;          /* I: worker task */
 134        struct global_cwq       *gcwq;          /* I: the associated gcwq */
 135        /* 64 bytes boundary on 64bit, 32 on 32bit */
 136        unsigned long           last_active;    /* L: last active timestamp */
 137        unsigned int            flags;          /* X: flags */
 138        int                     id;             /* I: worker id */
 139        struct work_struct      rebind_work;    /* L: rebind worker to cpu */
 140};
 141
 142/*
 143 * Global per-cpu workqueue.  There's one and only one for each cpu
 144 * and all works are queued and processed here regardless of their
 145 * target workqueues.
 146 */
 147struct global_cwq {
 148        spinlock_t              lock;           /* the gcwq lock */
 149        struct list_head        worklist;       /* L: list of pending works */
 150        unsigned int            cpu;            /* I: the associated cpu */
 151        unsigned int            flags;          /* L: GCWQ_* flags */
 152
 153        int                     nr_workers;     /* L: total number of workers */
 154        int                     nr_idle;        /* L: currently idle ones */
 155
 156        /* workers are chained either in the idle_list or busy_hash */
 157        struct list_head        idle_list;      /* X: list of idle workers */
 158        struct hlist_head       busy_hash[BUSY_WORKER_HASH_SIZE];
 159                                                /* L: hash of busy workers */
 160
 161        struct timer_list       idle_timer;     /* L: worker idle timeout */
 162        struct timer_list       mayday_timer;   /* L: SOS timer for dworkers */
 163
 164        struct ida              worker_ida;     /* L: for worker IDs */
 165
 166        struct task_struct      *trustee;       /* L: for gcwq shutdown */
 167        unsigned int            trustee_state;  /* L: trustee state */
 168        wait_queue_head_t       trustee_wait;   /* trustee wait */
 169        struct worker           *first_idle;    /* L: first idle worker */
 170} ____cacheline_aligned_in_smp;
 171
 172/*
 173 * The per-CPU workqueue.  The lower WORK_STRUCT_FLAG_BITS of
 174 * work_struct->data are used for flags and thus cwqs need to be
 175 * aligned at two's power of the number of flag bits.
 176 */
 177struct cpu_workqueue_struct {
 178        struct global_cwq       *gcwq;          /* I: the associated gcwq */
 179        struct workqueue_struct *wq;            /* I: the owning workqueue */
 180        int                     work_color;     /* L: current color */
 181        int                     flush_color;    /* L: flushing color */
 182        int                     nr_in_flight[WORK_NR_COLORS];
 183                                                /* L: nr of in_flight works */
 184        int                     nr_active;      /* L: nr of active works */
 185        int                     max_active;     /* L: max active works */
 186        struct list_head        delayed_works;  /* L: delayed works */
 187};
 188
 189/*
 190 * Structure used to wait for workqueue flush.
 191 */
 192struct wq_flusher {
 193        struct list_head        list;           /* F: list of flushers */
 194        int                     flush_color;    /* F: flush color waiting for */
 195        struct completion       done;           /* flush completion */
 196};
 197
 198/*
 199 * All cpumasks are assumed to be always set on UP and thus can't be
 200 * used to determine whether there's something to be done.
 201 */
 202#ifdef CONFIG_SMP
 203typedef cpumask_var_t mayday_mask_t;
 204#define mayday_test_and_set_cpu(cpu, mask)      \
 205        cpumask_test_and_set_cpu((cpu), (mask))
 206#define mayday_clear_cpu(cpu, mask)             cpumask_clear_cpu((cpu), (mask))
 207#define for_each_mayday_cpu(cpu, mask)          for_each_cpu((cpu), (mask))
 208#define alloc_mayday_mask(maskp, gfp)           zalloc_cpumask_var((maskp), (gfp))
 209#define free_mayday_mask(mask)                  free_cpumask_var((mask))
 210#else
 211typedef unsigned long mayday_mask_t;
 212#define mayday_test_and_set_cpu(cpu, mask)      test_and_set_bit(0, &(mask))
 213#define mayday_clear_cpu(cpu, mask)             clear_bit(0, &(mask))
 214#define for_each_mayday_cpu(cpu, mask)          if ((cpu) = 0, (mask))
 215#define alloc_mayday_mask(maskp, gfp)           true
 216#define free_mayday_mask(mask)                  do { } while (0)
 217#endif
 218
 219/*
 220 * The externally visible workqueue abstraction is an array of
 221 * per-CPU workqueues:
 222 */
 223struct workqueue_struct {
 224        unsigned int            flags;          /* I: WQ_* flags */
 225        union {
 226                struct cpu_workqueue_struct __percpu    *pcpu;
 227                struct cpu_workqueue_struct             *single;
 228                unsigned long                           v;
 229        } cpu_wq;                               /* I: cwq's */
 230        struct list_head        list;           /* W: list of all workqueues */
 231
 232        struct mutex            flush_mutex;    /* protects wq flushing */
 233        int                     work_color;     /* F: current work color */
 234        int                     flush_color;    /* F: current flush color */
 235        atomic_t                nr_cwqs_to_flush; /* flush in progress */
 236        struct wq_flusher       *first_flusher; /* F: first flusher */
 237        struct list_head        flusher_queue;  /* F: flush waiters */
 238        struct list_head        flusher_overflow; /* F: flush overflow list */
 239
 240        mayday_mask_t           mayday_mask;    /* cpus requesting rescue */
 241        struct worker           *rescuer;       /* I: rescue worker */
 242
 243        int                     saved_max_active; /* W: saved cwq max_active */
 244        const char              *name;          /* I: workqueue name */
 245#ifdef CONFIG_LOCKDEP
 246        struct lockdep_map      lockdep_map;
 247#endif
 248};
 249
 250struct workqueue_struct *system_wq __read_mostly;
 251struct workqueue_struct *system_long_wq __read_mostly;
 252struct workqueue_struct *system_nrt_wq __read_mostly;
 253struct workqueue_struct *system_unbound_wq __read_mostly;
 254struct workqueue_struct *system_freezable_wq __read_mostly;
 255EXPORT_SYMBOL_GPL(system_wq);
 256EXPORT_SYMBOL_GPL(system_long_wq);
 257EXPORT_SYMBOL_GPL(system_nrt_wq);
 258EXPORT_SYMBOL_GPL(system_unbound_wq);
 259EXPORT_SYMBOL_GPL(system_freezable_wq);
 260
 261#define CREATE_TRACE_POINTS
 262#include <trace/events/workqueue.h>
 263
 264#define for_each_busy_worker(worker, i, pos, gcwq)                      \
 265        for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)                     \
 266                hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
 267
 268static inline int __next_gcwq_cpu(int cpu, const struct cpumask *mask,
 269                                  unsigned int sw)
 270{
 271        if (cpu < nr_cpu_ids) {
 272                if (sw & 1) {
 273                        cpu = cpumask_next(cpu, mask);
 274                        if (cpu < nr_cpu_ids)
 275                                return cpu;
 276                }
 277                if (sw & 2)
 278                        return WORK_CPU_UNBOUND;
 279        }
 280        return WORK_CPU_NONE;
 281}
 282
 283static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
 284                                struct workqueue_struct *wq)
 285{
 286        return __next_gcwq_cpu(cpu, mask, !(wq->flags & WQ_UNBOUND) ? 1 : 2);
 287}
 288
 289/*
 290 * CPU iterators
 291 *
 292 * An extra gcwq is defined for an invalid cpu number
 293 * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any
 294 * specific CPU.  The following iterators are similar to
 295 * for_each_*_cpu() iterators but also considers the unbound gcwq.
 296 *
 297 * for_each_gcwq_cpu()          : possible CPUs + WORK_CPU_UNBOUND
 298 * for_each_online_gcwq_cpu()   : online CPUs + WORK_CPU_UNBOUND
 299 * for_each_cwq_cpu()           : possible CPUs for bound workqueues,
 300 *                                WORK_CPU_UNBOUND for unbound workqueues
 301 */
 302#define for_each_gcwq_cpu(cpu)                                          \
 303        for ((cpu) = __next_gcwq_cpu(-1, cpu_possible_mask, 3);         \
 304             (cpu) < WORK_CPU_NONE;                                     \
 305             (cpu) = __next_gcwq_cpu((cpu), cpu_possible_mask, 3))
 306
 307#define for_each_online_gcwq_cpu(cpu)                                   \
 308        for ((cpu) = __next_gcwq_cpu(-1, cpu_online_mask, 3);           \
 309             (cpu) < WORK_CPU_NONE;                                     \
 310             (cpu) = __next_gcwq_cpu((cpu), cpu_online_mask, 3))
 311
 312#define for_each_cwq_cpu(cpu, wq)                                       \
 313        for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, (wq));        \
 314             (cpu) < WORK_CPU_NONE;                                     \
 315             (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, (wq)))
 316
 317#ifdef CONFIG_DEBUG_OBJECTS_WORK
 318
 319static struct debug_obj_descr work_debug_descr;
 320
 321static void *work_debug_hint(void *addr)
 322{
 323        return ((struct work_struct *) addr)->func;
 324}
 325
 326/*
 327 * fixup_init is called when:
 328 * - an active object is initialized
 329 */
 330static int work_fixup_init(void *addr, enum debug_obj_state state)
 331{
 332        struct work_struct *work = addr;
 333
 334        switch (state) {
 335        case ODEBUG_STATE_ACTIVE:
 336                cancel_work_sync(work);
 337                debug_object_init(work, &work_debug_descr);
 338                return 1;
 339        default:
 340                return 0;
 341        }
 342}
 343
 344/*
 345 * fixup_activate is called when:
 346 * - an active object is activated
 347 * - an unknown object is activated (might be a statically initialized object)
 348 */
 349static int work_fixup_activate(void *addr, enum debug_obj_state state)
 350{
 351        struct work_struct *work = addr;
 352
 353        switch (state) {
 354
 355        case ODEBUG_STATE_NOTAVAILABLE:
 356                /*
 357                 * This is not really a fixup. The work struct was
 358                 * statically initialized. We just make sure that it
 359                 * is tracked in the object tracker.
 360                 */
 361                if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
 362                        debug_object_init(work, &work_debug_descr);
 363                        debug_object_activate(work, &work_debug_descr);
 364                        return 0;
 365                }
 366                WARN_ON_ONCE(1);
 367                return 0;
 368
 369        case ODEBUG_STATE_ACTIVE:
 370                WARN_ON(1);
 371
 372        default:
 373                return 0;
 374        }
 375}
 376
 377/*
 378 * fixup_free is called when:
 379 * - an active object is freed
 380 */
 381static int work_fixup_free(void *addr, enum debug_obj_state state)
 382{
 383        struct work_struct *work = addr;
 384
 385        switch (state) {
 386        case ODEBUG_STATE_ACTIVE:
 387                cancel_work_sync(work);
 388                debug_object_free(work, &work_debug_descr);
 389                return 1;
 390        default:
 391                return 0;
 392        }
 393}
 394
 395static struct debug_obj_descr work_debug_descr = {
 396        .name           = "work_struct",
 397        .debug_hint     = work_debug_hint,
 398        .fixup_init     = work_fixup_init,
 399        .fixup_activate = work_fixup_activate,
 400        .fixup_free     = work_fixup_free,
 401};
 402
 403static inline void debug_work_activate(struct work_struct *work)
 404{
 405        debug_object_activate(work, &work_debug_descr);
 406}
 407
 408static inline void debug_work_deactivate(struct work_struct *work)
 409{
 410        debug_object_deactivate(work, &work_debug_descr);
 411}
 412
 413void __init_work(struct work_struct *work, int onstack)
 414{
 415        if (onstack)
 416                debug_object_init_on_stack(work, &work_debug_descr);
 417        else
 418                debug_object_init(work, &work_debug_descr);
 419}
 420EXPORT_SYMBOL_GPL(__init_work);
 421
 422void destroy_work_on_stack(struct work_struct *work)
 423{
 424        debug_object_free(work, &work_debug_descr);
 425}
 426EXPORT_SYMBOL_GPL(destroy_work_on_stack);
 427
 428#else
 429static inline void debug_work_activate(struct work_struct *work) { }
 430static inline void debug_work_deactivate(struct work_struct *work) { }
 431#endif
 432
 433/* Serializes the accesses to the list of workqueues. */
 434static DEFINE_SPINLOCK(workqueue_lock);
 435static LIST_HEAD(workqueues);
 436static bool workqueue_freezing;         /* W: have wqs started freezing? */
 437
 438/*
 439 * The almighty global cpu workqueues.  nr_running is the only field
 440 * which is expected to be used frequently by other cpus via
 441 * try_to_wake_up().  Put it in a separate cacheline.
 442 */
 443static DEFINE_PER_CPU(struct global_cwq, global_cwq);
 444static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
 445
 446/*
 447 * Global cpu workqueue and nr_running counter for unbound gcwq.  The
 448 * gcwq is always online, has GCWQ_DISASSOCIATED set, and all its
 449 * workers have WORKER_UNBOUND set.
 450 */
 451static struct global_cwq unbound_global_cwq;
 452static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0);       /* always 0 */
 453
 454static int worker_thread(void *__worker);
 455
 456static struct global_cwq *get_gcwq(unsigned int cpu)
 457{
 458        if (cpu != WORK_CPU_UNBOUND)
 459                return &per_cpu(global_cwq, cpu);
 460        else
 461                return &unbound_global_cwq;
 462}
 463
 464static atomic_t *get_gcwq_nr_running(unsigned int cpu)
 465{
 466        if (cpu != WORK_CPU_UNBOUND)
 467                return &per_cpu(gcwq_nr_running, cpu);
 468        else
 469                return &unbound_gcwq_nr_running;
 470}
 471
 472static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
 473                                            struct workqueue_struct *wq)
 474{
 475        if (!(wq->flags & WQ_UNBOUND)) {
 476                if (likely(cpu < nr_cpu_ids)) {
 477#ifdef CONFIG_SMP
 478                        return per_cpu_ptr(wq->cpu_wq.pcpu, cpu);
 479#else
 480                        return wq->cpu_wq.single;
 481#endif
 482                }
 483        } else if (likely(cpu == WORK_CPU_UNBOUND))
 484                return wq->cpu_wq.single;
 485        return NULL;
 486}
 487
 488static unsigned int work_color_to_flags(int color)
 489{
 490        return color << WORK_STRUCT_COLOR_SHIFT;
 491}
 492
 493static int get_work_color(struct work_struct *work)
 494{
 495        return (*work_data_bits(work) >> WORK_STRUCT_COLOR_SHIFT) &
 496                ((1 << WORK_STRUCT_COLOR_BITS) - 1);
 497}
 498
 499static int work_next_color(int color)
 500{
 501        return (color + 1) % WORK_NR_COLORS;
 502}
 503
 504/*
 505 * A work's data points to the cwq with WORK_STRUCT_CWQ set while the
 506 * work is on queue.  Once execution starts, WORK_STRUCT_CWQ is
 507 * cleared and the work data contains the cpu number it was last on.
 508 *
 509 * set_work_{cwq|cpu}() and clear_work_data() can be used to set the
 510 * cwq, cpu or clear work->data.  These functions should only be
 511 * called while the work is owned - ie. while the PENDING bit is set.
 512 *
 513 * get_work_[g]cwq() can be used to obtain the gcwq or cwq
 514 * corresponding to a work.  gcwq is available once the work has been
 515 * queued anywhere after initialization.  cwq is available only from
 516 * queueing until execution starts.
 517 */
 518static inline void set_work_data(struct work_struct *work, unsigned long data,
 519                                 unsigned long flags)
 520{
 521        BUG_ON(!work_pending(work));
 522        atomic_long_set(&work->data, data | flags | work_static(work));
 523}
 524
 525static void set_work_cwq(struct work_struct *work,
 526                         struct cpu_workqueue_struct *cwq,
 527                         unsigned long extra_flags)
 528{
 529        set_work_data(work, (unsigned long)cwq,
 530                      WORK_STRUCT_PENDING | WORK_STRUCT_CWQ | extra_flags);
 531}
 532
 533static void set_work_cpu(struct work_struct *work, unsigned int cpu)
 534{
 535        set_work_data(work, cpu << WORK_STRUCT_FLAG_BITS, WORK_STRUCT_PENDING);
 536}
 537
 538static void clear_work_data(struct work_struct *work)
 539{
 540        set_work_data(work, WORK_STRUCT_NO_CPU, 0);
 541}
 542
 543static struct cpu_workqueue_struct *get_work_cwq(struct work_struct *work)
 544{
 545        unsigned long data = atomic_long_read(&work->data);
 546
 547        if (data & WORK_STRUCT_CWQ)
 548                return (void *)(data & WORK_STRUCT_WQ_DATA_MASK);
 549        else
 550                return NULL;
 551}
 552
 553static struct global_cwq *get_work_gcwq(struct work_struct *work)
 554{
 555        unsigned long data = atomic_long_read(&work->data);
 556        unsigned int cpu;
 557
 558        if (data & WORK_STRUCT_CWQ)
 559                return ((struct cpu_workqueue_struct *)
 560                        (data & WORK_STRUCT_WQ_DATA_MASK))->gcwq;
 561
 562        cpu = data >> WORK_STRUCT_FLAG_BITS;
 563        if (cpu == WORK_CPU_NONE)
 564                return NULL;
 565
 566        BUG_ON(cpu >= nr_cpu_ids && cpu != WORK_CPU_UNBOUND);
 567        return get_gcwq(cpu);
 568}
 569
 570/*
 571 * Policy functions.  These define the policies on how the global
 572 * worker pool is managed.  Unless noted otherwise, these functions
 573 * assume that they're being called with gcwq->lock held.
 574 */
 575
 576static bool __need_more_worker(struct global_cwq *gcwq)
 577{
 578        return !atomic_read(get_gcwq_nr_running(gcwq->cpu)) ||
 579                gcwq->flags & GCWQ_HIGHPRI_PENDING;
 580}
 581
 582/*
 583 * Need to wake up a worker?  Called from anything but currently
 584 * running workers.
 585 */
 586static bool need_more_worker(struct global_cwq *gcwq)
 587{
 588        return !list_empty(&gcwq->worklist) && __need_more_worker(gcwq);
 589}
 590
 591/* Can I start working?  Called from busy but !running workers. */
 592static bool may_start_working(struct global_cwq *gcwq)
 593{
 594        return gcwq->nr_idle;
 595}
 596
 597/* Do I need to keep working?  Called from currently running workers. */
 598static bool keep_working(struct global_cwq *gcwq)
 599{
 600        atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
 601
 602        return !list_empty(&gcwq->worklist) &&
 603                (atomic_read(nr_running) <= 1 ||
 604                 gcwq->flags & GCWQ_HIGHPRI_PENDING);
 605}
 606
 607/* Do we need a new worker?  Called from manager. */
 608static bool need_to_create_worker(struct global_cwq *gcwq)
 609{
 610        return need_more_worker(gcwq) && !may_start_working(gcwq);
 611}
 612
 613/* Do I need to be the manager? */
 614static bool need_to_manage_workers(struct global_cwq *gcwq)
 615{
 616        return need_to_create_worker(gcwq) || gcwq->flags & GCWQ_MANAGE_WORKERS;
 617}
 618
 619/* Do we have too many workers and should some go away? */
 620static bool too_many_workers(struct global_cwq *gcwq)
 621{
 622        bool managing = gcwq->flags & GCWQ_MANAGING_WORKERS;
 623        int nr_idle = gcwq->nr_idle + managing; /* manager is considered idle */
 624        int nr_busy = gcwq->nr_workers - nr_idle;
 625
 626        return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
 627}
 628
 629/*
 630 * Wake up functions.
 631 */
 632
 633/* Return the first worker.  Safe with preemption disabled */
 634static struct worker *first_worker(struct global_cwq *gcwq)
 635{
 636        if (unlikely(list_empty(&gcwq->idle_list)))
 637                return NULL;
 638
 639        return list_first_entry(&gcwq->idle_list, struct worker, entry);
 640}
 641
 642/**
 643 * wake_up_worker - wake up an idle worker
 644 * @gcwq: gcwq to wake worker for
 645 *
 646 * Wake up the first idle worker of @gcwq.
 647 *
 648 * CONTEXT:
 649 * spin_lock_irq(gcwq->lock).
 650 */
 651static void wake_up_worker(struct global_cwq *gcwq)
 652{
 653        struct worker *worker = first_worker(gcwq);
 654
 655        if (likely(worker))
 656                wake_up_process(worker->task);
 657}
 658
 659/**
 660 * wq_worker_waking_up - a worker is waking up
 661 * @task: task waking up
 662 * @cpu: CPU @task is waking up to
 663 *
 664 * This function is called during try_to_wake_up() when a worker is
 665 * being awoken.
 666 *
 667 * CONTEXT:
 668 * spin_lock_irq(rq->lock)
 669 */
 670void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)
 671{
 672        struct worker *worker = kthread_data(task);
 673
 674        if (!(worker->flags & WORKER_NOT_RUNNING))
 675                atomic_inc(get_gcwq_nr_running(cpu));
 676}
 677
 678/**
 679 * wq_worker_sleeping - a worker is going to sleep
 680 * @task: task going to sleep
 681 * @cpu: CPU in question, must be the current CPU number
 682 *
 683 * This function is called during schedule() when a busy worker is
 684 * going to sleep.  Worker on the same cpu can be woken up by
 685 * returning pointer to its task.
 686 *
 687 * CONTEXT:
 688 * spin_lock_irq(rq->lock)
 689 *
 690 * RETURNS:
 691 * Worker task on @cpu to wake up, %NULL if none.
 692 */
 693struct task_struct *wq_worker_sleeping(struct task_struct *task,
 694                                       unsigned int cpu)
 695{
 696        struct worker *worker = kthread_data(task), *to_wakeup = NULL;
 697        struct global_cwq *gcwq = get_gcwq(cpu);
 698        atomic_t *nr_running = get_gcwq_nr_running(cpu);
 699
 700        if (worker->flags & WORKER_NOT_RUNNING)
 701                return NULL;
 702
 703        /* this can only happen on the local cpu */
 704        BUG_ON(cpu != raw_smp_processor_id());
 705
 706        /*
 707         * The counterpart of the following dec_and_test, implied mb,
 708         * worklist not empty test sequence is in insert_work().
 709         * Please read comment there.
 710         *
 711         * NOT_RUNNING is clear.  This means that trustee is not in
 712         * charge and we're running on the local cpu w/ rq lock held
 713         * and preemption disabled, which in turn means that none else
 714         * could be manipulating idle_list, so dereferencing idle_list
 715         * without gcwq lock is safe.
 716         */
 717        if (atomic_dec_and_test(nr_running) && !list_empty(&gcwq->worklist))
 718                to_wakeup = first_worker(gcwq);
 719        return to_wakeup ? to_wakeup->task : NULL;
 720}
 721
 722/**
 723 * worker_set_flags - set worker flags and adjust nr_running accordingly
 724 * @worker: self
 725 * @flags: flags to set
 726 * @wakeup: wakeup an idle worker if necessary
 727 *
 728 * Set @flags in @worker->flags and adjust nr_running accordingly.  If
 729 * nr_running becomes zero and @wakeup is %true, an idle worker is
 730 * woken up.
 731 *
 732 * CONTEXT:
 733 * spin_lock_irq(gcwq->lock)
 734 */
 735static inline void worker_set_flags(struct worker *worker, unsigned int flags,
 736                                    bool wakeup)
 737{
 738        struct global_cwq *gcwq = worker->gcwq;
 739
 740        WARN_ON_ONCE(worker->task != current);
 741
 742        /*
 743         * If transitioning into NOT_RUNNING, adjust nr_running and
 744         * wake up an idle worker as necessary if requested by
 745         * @wakeup.
 746         */
 747        if ((flags & WORKER_NOT_RUNNING) &&
 748            !(worker->flags & WORKER_NOT_RUNNING)) {
 749                atomic_t *nr_running = get_gcwq_nr_running(gcwq->cpu);
 750
 751                if (wakeup) {
 752                        if (atomic_dec_and_test(nr_running) &&
 753                            !list_empty(&gcwq->worklist))
 754                                wake_up_worker(gcwq);
 755                } else
 756                        atomic_dec(nr_running);
 757        }
 758
 759        worker->flags |= flags;
 760}
 761
 762/**
 763 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
 764 * @worker: self
 765 * @flags: flags to clear
 766 *
 767 * Clear @flags in @worker->flags and adjust nr_running accordingly.
 768 *
 769 * CONTEXT:
 770 * spin_lock_irq(gcwq->lock)
 771 */
 772static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
 773{
 774        struct global_cwq *gcwq = worker->gcwq;
 775        unsigned int oflags = worker->flags;
 776
 777        WARN_ON_ONCE(worker->task != current);
 778
 779        worker->flags &= ~flags;
 780
 781        /*
 782         * If transitioning out of NOT_RUNNING, increment nr_running.  Note
 783         * that the nested NOT_RUNNING is not a noop.  NOT_RUNNING is mask
 784         * of multiple flags, not a single flag.
 785         */
 786        if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
 787                if (!(worker->flags & WORKER_NOT_RUNNING))
 788                        atomic_inc(get_gcwq_nr_running(gcwq->cpu));
 789}
 790
 791/**
 792 * busy_worker_head - return the busy hash head for a work
 793 * @gcwq: gcwq of interest
 794 * @work: work to be hashed
 795 *
 796 * Return hash head of @gcwq for @work.
 797 *
 798 * CONTEXT:
 799 * spin_lock_irq(gcwq->lock).
 800 *
 801 * RETURNS:
 802 * Pointer to the hash head.
 803 */
 804static struct hlist_head *busy_worker_head(struct global_cwq *gcwq,
 805                                           struct work_struct *work)
 806{
 807        const int base_shift = ilog2(sizeof(struct work_struct));
 808        unsigned long v = (unsigned long)work;
 809
 810        /* simple shift and fold hash, do we need something better? */
 811        v >>= base_shift;
 812        v += v >> BUSY_WORKER_HASH_ORDER;
 813        v &= BUSY_WORKER_HASH_MASK;
 814
 815        return &gcwq->busy_hash[v];
 816}
 817
 818/**
 819 * __find_worker_executing_work - find worker which is executing a work
 820 * @gcwq: gcwq of interest
 821 * @bwh: hash head as returned by busy_worker_head()
 822 * @work: work to find worker for
 823 *
 824 * Find a worker which is executing @work on @gcwq.  @bwh should be
 825 * the hash head obtained by calling busy_worker_head() with the same
 826 * work.
 827 *
 828 * CONTEXT:
 829 * spin_lock_irq(gcwq->lock).
 830 *
 831 * RETURNS:
 832 * Pointer to worker which is executing @work if found, NULL
 833 * otherwise.
 834 */
 835static struct worker *__find_worker_executing_work(struct global_cwq *gcwq,
 836                                                   struct hlist_head *bwh,
 837                                                   struct work_struct *work)
 838{
 839        struct worker *worker;
 840        struct hlist_node *tmp;
 841
 842        hlist_for_each_entry(worker, tmp, bwh, hentry)
 843                if (worker->current_work == work)
 844                        return worker;
 845        return NULL;
 846}
 847
 848/**
 849 * find_worker_executing_work - find worker which is executing a work
 850 * @gcwq: gcwq of interest
 851 * @work: work to find worker for
 852 *
 853 * Find a worker which is executing @work on @gcwq.  This function is
 854 * identical to __find_worker_executing_work() except that this
 855 * function calculates @bwh itself.
 856 *
 857 * CONTEXT:
 858 * spin_lock_irq(gcwq->lock).
 859 *
 860 * RETURNS:
 861 * Pointer to worker which is executing @work if found, NULL
 862 * otherwise.
 863 */
 864static struct worker *find_worker_executing_work(struct global_cwq *gcwq,
 865                                                 struct work_struct *work)
 866{
 867        return __find_worker_executing_work(gcwq, busy_worker_head(gcwq, work),
 868                                            work);
 869}
 870
 871/**
 872 * gcwq_determine_ins_pos - find insertion position
 873 * @gcwq: gcwq of interest
 874 * @cwq: cwq a work is being queued for
 875 *
 876 * A work for @cwq is about to be queued on @gcwq, determine insertion
 877 * position for the work.  If @cwq is for HIGHPRI wq, the work is
 878 * queued at the head of the queue but in FIFO order with respect to
 879 * other HIGHPRI works; otherwise, at the end of the queue.  This
 880 * function also sets GCWQ_HIGHPRI_PENDING flag to hint @gcwq that
 881 * there are HIGHPRI works pending.
 882 *
 883 * CONTEXT:
 884 * spin_lock_irq(gcwq->lock).
 885 *
 886 * RETURNS:
 887 * Pointer to inserstion position.
 888 */
 889static inline struct list_head *gcwq_determine_ins_pos(struct global_cwq *gcwq,
 890                                               struct cpu_workqueue_struct *cwq)
 891{
 892        struct work_struct *twork;
 893
 894        if (likely(!(cwq->wq->flags & WQ_HIGHPRI)))
 895                return &gcwq->worklist;
 896
 897        list_for_each_entry(twork, &gcwq->worklist, entry) {
 898                struct cpu_workqueue_struct *tcwq = get_work_cwq(twork);
 899
 900                if (!(tcwq->wq->flags & WQ_HIGHPRI))
 901                        break;
 902        }
 903
 904        gcwq->flags |= GCWQ_HIGHPRI_PENDING;
 905        return &twork->entry;
 906}
 907
 908/**
 909 * insert_work - insert a work into gcwq
 910 * @cwq: cwq @work belongs to
 911 * @work: work to insert
 912 * @head: insertion point
 913 * @extra_flags: extra WORK_STRUCT_* flags to set
 914 *
 915 * Insert @work which belongs to @cwq into @gcwq after @head.
 916 * @extra_flags is or'd to work_struct flags.
 917 *
 918 * CONTEXT:
 919 * spin_lock_irq(gcwq->lock).
 920 */
 921static void insert_work(struct cpu_workqueue_struct *cwq,
 922                        struct work_struct *work, struct list_head *head,
 923                        unsigned int extra_flags)
 924{
 925        struct global_cwq *gcwq = cwq->gcwq;
 926
 927        /* we own @work, set data and link */
 928        set_work_cwq(work, cwq, extra_flags);
 929
 930        /*
 931         * Ensure that we get the right work->data if we see the
 932         * result of list_add() below, see try_to_grab_pending().
 933         */
 934        smp_wmb();
 935
 936        list_add_tail(&work->entry, head);
 937
 938        /*
 939         * Ensure either worker_sched_deactivated() sees the above
 940         * list_add_tail() or we see zero nr_running to avoid workers
 941         * lying around lazily while there are works to be processed.
 942         */
 943        smp_mb();
 944
 945        if (__need_more_worker(gcwq))
 946                wake_up_worker(gcwq);
 947}
 948
 949/*
 950 * Test whether @work is being queued from another work executing on the
 951 * same workqueue.  This is rather expensive and should only be used from
 952 * cold paths.
 953 */
 954static bool is_chained_work(struct workqueue_struct *wq)
 955{
 956        unsigned long flags;
 957        unsigned int cpu;
 958
 959        for_each_gcwq_cpu(cpu) {
 960                struct global_cwq *gcwq = get_gcwq(cpu);
 961                struct worker *worker;
 962                struct hlist_node *pos;
 963                int i;
 964
 965                spin_lock_irqsave(&gcwq->lock, flags);
 966                for_each_busy_worker(worker, i, pos, gcwq) {
 967                        if (worker->task != current)
 968                                continue;
 969                        spin_unlock_irqrestore(&gcwq->lock, flags);
 970                        /*
 971                         * I'm @worker, no locking necessary.  See if @work
 972                         * is headed to the same workqueue.
 973                         */
 974                        return worker->current_cwq->wq == wq;
 975                }
 976                spin_unlock_irqrestore(&gcwq->lock, flags);
 977        }
 978        return false;
 979}
 980
 981static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
 982                         struct work_struct *work)
 983{
 984        struct global_cwq *gcwq;
 985        struct cpu_workqueue_struct *cwq;
 986        struct list_head *worklist;
 987        unsigned int work_flags;
 988        unsigned long flags;
 989
 990        debug_work_activate(work);
 991
 992        /* if dying, only works from the same workqueue are allowed */
 993        if (unlikely(wq->flags & WQ_DYING) &&
 994            WARN_ON_ONCE(!is_chained_work(wq)))
 995                return;
 996
 997        /* determine gcwq to use */
 998        if (!(wq->flags & WQ_UNBOUND)) {
 999                struct global_cwq *last_gcwq;
1000
1001                if (unlikely(cpu == WORK_CPU_UNBOUND))
1002                        cpu = raw_smp_processor_id();
1003
1004                /*
1005                 * It's multi cpu.  If @wq is non-reentrant and @work
1006                 * was previously on a different cpu, it might still
1007                 * be running there, in which case the work needs to
1008                 * be queued on that cpu to guarantee non-reentrance.
1009                 */
1010                gcwq = get_gcwq(cpu);
1011                if (wq->flags & WQ_NON_REENTRANT &&
1012                    (last_gcwq = get_work_gcwq(work)) && last_gcwq != gcwq) {
1013                        struct worker *worker;
1014
1015                        spin_lock_irqsave(&last_gcwq->lock, flags);
1016
1017                        worker = find_worker_executing_work(last_gcwq, work);
1018
1019                        if (worker && worker->current_cwq->wq == wq)
1020                                gcwq = last_gcwq;
1021                        else {
1022                                /* meh... not running there, queue here */
1023                                spin_unlock_irqrestore(&last_gcwq->lock, flags);
1024                                spin_lock_irqsave(&gcwq->lock, flags);
1025                        }
1026                } else
1027                        spin_lock_irqsave(&gcwq->lock, flags);
1028        } else {
1029                gcwq = get_gcwq(WORK_CPU_UNBOUND);
1030                spin_lock_irqsave(&gcwq->lock, flags);
1031        }
1032
1033        /* gcwq determined, get cwq and queue */
1034        cwq = get_cwq(gcwq->cpu, wq);
1035        trace_workqueue_queue_work(cpu, cwq, work);
1036
1037        BUG_ON(!list_empty(&work->entry));
1038
1039        cwq->nr_in_flight[cwq->work_color]++;
1040        work_flags = work_color_to_flags(cwq->work_color);
1041
1042        if (likely(cwq->nr_active < cwq->max_active)) {
1043                trace_workqueue_activate_work(work);
1044                cwq->nr_active++;
1045                worklist = gcwq_determine_ins_pos(gcwq, cwq);
1046        } else {
1047                work_flags |= WORK_STRUCT_DELAYED;
1048                worklist = &cwq->delayed_works;
1049        }
1050
1051        insert_work(cwq, work, worklist, work_flags);
1052
1053        spin_unlock_irqrestore(&gcwq->lock, flags);
1054}
1055
1056/**
1057 * queue_work - queue work on a workqueue
1058 * @wq: workqueue to use
1059 * @work: work to queue
1060 *
1061 * Returns 0 if @work was already on a queue, non-zero otherwise.
1062 *
1063 * We queue the work to the CPU on which it was submitted, but if the CPU dies
1064 * it can be processed by another CPU.
1065 */
1066int queue_work(struct workqueue_struct *wq, struct work_struct *work)
1067{
1068        int ret;
1069
1070        ret = queue_work_on(get_cpu(), wq, work);
1071        put_cpu();
1072
1073        return ret;
1074}
1075EXPORT_SYMBOL_GPL(queue_work);
1076
1077/**
1078 * queue_work_on - queue work on specific cpu
1079 * @cpu: CPU number to execute work on
1080 * @wq: workqueue to use
1081 * @work: work to queue
1082 *
1083 * Returns 0 if @work was already on a queue, non-zero otherwise.
1084 *
1085 * We queue the work to a specific CPU, the caller must ensure it
1086 * can't go away.
1087 */
1088int
1089queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
1090{
1091        int ret = 0;
1092
1093        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1094                __queue_work(cpu, wq, work);
1095                ret = 1;
1096        }
1097        return ret;
1098}
1099EXPORT_SYMBOL_GPL(queue_work_on);
1100
1101static void delayed_work_timer_fn(unsigned long __data)
1102{
1103        struct delayed_work *dwork = (struct delayed_work *)__data;
1104        struct cpu_workqueue_struct *cwq = get_work_cwq(&dwork->work);
1105
1106        __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
1107}
1108
1109/**
1110 * queue_delayed_work - queue work on a workqueue after delay
1111 * @wq: workqueue to use
1112 * @dwork: delayable work to queue
1113 * @delay: number of jiffies to wait before queueing
1114 *
1115 * Returns 0 if @work was already on a queue, non-zero otherwise.
1116 */
1117int queue_delayed_work(struct workqueue_struct *wq,
1118                        struct delayed_work *dwork, unsigned long delay)
1119{
1120        if (delay == 0)
1121                return queue_work(wq, &dwork->work);
1122
1123        return queue_delayed_work_on(-1, wq, dwork, delay);
1124}
1125EXPORT_SYMBOL_GPL(queue_delayed_work);
1126
1127/**
1128 * queue_delayed_work_on - queue work on specific CPU after delay
1129 * @cpu: CPU number to execute work on
1130 * @wq: workqueue to use
1131 * @dwork: work to queue
1132 * @delay: number of jiffies to wait before queueing
1133 *
1134 * Returns 0 if @work was already on a queue, non-zero otherwise.
1135 */
1136int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1137                        struct delayed_work *dwork, unsigned long delay)
1138{
1139        int ret = 0;
1140        struct timer_list *timer = &dwork->timer;
1141        struct work_struct *work = &dwork->work;
1142
1143        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
1144                unsigned int lcpu;
1145
1146                BUG_ON(timer_pending(timer));
1147                BUG_ON(!list_empty(&work->entry));
1148
1149                timer_stats_timer_set_start_info(&dwork->timer);
1150
1151                /*
1152                 * This stores cwq for the moment, for the timer_fn.
1153                 * Note that the work's gcwq is preserved to allow
1154                 * reentrance detection for delayed works.
1155                 */
1156                if (!(wq->flags & WQ_UNBOUND)) {
1157                        struct global_cwq *gcwq = get_work_gcwq(work);
1158
1159                        if (gcwq && gcwq->cpu != WORK_CPU_UNBOUND)
1160                                lcpu = gcwq->cpu;
1161                        else
1162                                lcpu = raw_smp_processor_id();
1163                } else
1164                        lcpu = WORK_CPU_UNBOUND;
1165
1166                set_work_cwq(work, get_cwq(lcpu, wq), 0);
1167
1168                timer->expires = jiffies + delay;
1169                timer->data = (unsigned long)dwork;
1170                timer->function = delayed_work_timer_fn;
1171
1172                if (unlikely(cpu >= 0))
1173                        add_timer_on(timer, cpu);
1174                else
1175                        add_timer(timer);
1176                ret = 1;
1177        }
1178        return ret;
1179}
1180EXPORT_SYMBOL_GPL(queue_delayed_work_on);
1181
1182/**
1183 * worker_enter_idle - enter idle state
1184 * @worker: worker which is entering idle state
1185 *
1186 * @worker is entering idle state.  Update stats and idle timer if
1187 * necessary.
1188 *
1189 * LOCKING:
1190 * spin_lock_irq(gcwq->lock).
1191 */
1192static void worker_enter_idle(struct worker *worker)
1193{
1194        struct global_cwq *gcwq = worker->gcwq;
1195
1196        BUG_ON(worker->flags & WORKER_IDLE);
1197        BUG_ON(!list_empty(&worker->entry) &&
1198               (worker->hentry.next || worker->hentry.pprev));
1199
1200        /* can't use worker_set_flags(), also called from start_worker() */
1201        worker->flags |= WORKER_IDLE;
1202        gcwq->nr_idle++;
1203        worker->last_active = jiffies;
1204
1205        /* idle_list is LIFO */
1206        list_add(&worker->entry, &gcwq->idle_list);
1207
1208        if (likely(!(worker->flags & WORKER_ROGUE))) {
1209                if (too_many_workers(gcwq) && !timer_pending(&gcwq->idle_timer))
1210                        mod_timer(&gcwq->idle_timer,
1211                                  jiffies + IDLE_WORKER_TIMEOUT);
1212        } else
1213                wake_up_all(&gcwq->trustee_wait);
1214
1215        /* sanity check nr_running */
1216        WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
1217                     atomic_read(get_gcwq_nr_running(gcwq->cpu)));
1218}
1219
1220/**
1221 * worker_leave_idle - leave idle state
1222 * @worker: worker which is leaving idle state
1223 *
1224 * @worker is leaving idle state.  Update stats.
1225 *
1226 * LOCKING:
1227 * spin_lock_irq(gcwq->lock).
1228 */
1229static void worker_leave_idle(struct worker *worker)
1230{
1231        struct global_cwq *gcwq = worker->gcwq;
1232
1233        BUG_ON(!(worker->flags & WORKER_IDLE));
1234        worker_clr_flags(worker, WORKER_IDLE);
1235        gcwq->nr_idle--;
1236        list_del_init(&worker->entry);
1237}
1238
1239/**
1240 * worker_maybe_bind_and_lock - bind worker to its cpu if possible and lock gcwq
1241 * @worker: self
1242 *
1243 * Works which are scheduled while the cpu is online must at least be
1244 * scheduled to a worker which is bound to the cpu so that if they are
1245 * flushed from cpu callbacks while cpu is going down, they are
1246 * guaranteed to execute on the cpu.
1247 *
1248 * This function is to be used by rogue workers and rescuers to bind
1249 * themselves to the target cpu and may race with cpu going down or
1250 * coming online.  kthread_bind() can't be used because it may put the
1251 * worker to already dead cpu and set_cpus_allowed_ptr() can't be used
1252 * verbatim as it's best effort and blocking and gcwq may be
1253 * [dis]associated in the meantime.
1254 *
1255 * This function tries set_cpus_allowed() and locks gcwq and verifies
1256 * the binding against GCWQ_DISASSOCIATED which is set during
1257 * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
1258 * idle state or fetches works without dropping lock, it can guarantee
1259 * the scheduling requirement described in the first paragraph.
1260 *
1261 * CONTEXT:
1262 * Might sleep.  Called without any lock but returns with gcwq->lock
1263 * held.
1264 *
1265 * RETURNS:
1266 * %true if the associated gcwq is online (@worker is successfully
1267 * bound), %false if offline.
1268 */
1269static bool worker_maybe_bind_and_lock(struct worker *worker)
1270__acquires(&gcwq->lock)
1271{
1272        struct global_cwq *gcwq = worker->gcwq;
1273        struct task_struct *task = worker->task;
1274
1275        while (true) {
1276                /*
1277                 * The following call may fail, succeed or succeed
1278                 * without actually migrating the task to the cpu if
1279                 * it races with cpu hotunplug operation.  Verify
1280                 * against GCWQ_DISASSOCIATED.
1281                 */
1282                if (!(gcwq->flags & GCWQ_DISASSOCIATED))
1283                        set_cpus_allowed_ptr(task, get_cpu_mask(gcwq->cpu));
1284
1285                spin_lock_irq(&gcwq->lock);
1286                if (gcwq->flags & GCWQ_DISASSOCIATED)
1287                        return false;
1288                if (task_cpu(task) == gcwq->cpu &&
1289                    cpumask_equal(&current->cpus_allowed,
1290                                  get_cpu_mask(gcwq->cpu)))
1291                        return true;
1292                spin_unlock_irq(&gcwq->lock);
1293
1294                /*
1295                 * We've raced with CPU hot[un]plug.  Give it a breather
1296                 * and retry migration.  cond_resched() is required here;
1297                 * otherwise, we might deadlock against cpu_stop trying to
1298                 * bring down the CPU on non-preemptive kernel.
1299                 */
1300                cpu_relax();
1301                cond_resched();
1302        }
1303}
1304
1305/*
1306 * Function for worker->rebind_work used to rebind rogue busy workers
1307 * to the associated cpu which is coming back online.  This is
1308 * scheduled by cpu up but can race with other cpu hotplug operations
1309 * and may be executed twice without intervening cpu down.
1310 */
1311static void worker_rebind_fn(struct work_struct *work)
1312{
1313        struct worker *worker = container_of(work, struct worker, rebind_work);
1314        struct global_cwq *gcwq = worker->gcwq;
1315
1316        if (worker_maybe_bind_and_lock(worker))
1317                worker_clr_flags(worker, WORKER_REBIND);
1318
1319        spin_unlock_irq(&gcwq->lock);
1320}
1321
1322static struct worker *alloc_worker(void)
1323{
1324        struct worker *worker;
1325
1326        worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1327        if (worker) {
1328                INIT_LIST_HEAD(&worker->entry);
1329                INIT_LIST_HEAD(&worker->scheduled);
1330                INIT_WORK(&worker->rebind_work, worker_rebind_fn);
1331                /* on creation a worker is in !idle && prep state */
1332                worker->flags = WORKER_PREP;
1333        }
1334        return worker;
1335}
1336
1337/**
1338 * create_worker - create a new workqueue worker
1339 * @gcwq: gcwq the new worker will belong to
1340 * @bind: whether to set affinity to @cpu or not
1341 *
1342 * Create a new worker which is bound to @gcwq.  The returned worker
1343 * can be started by calling start_worker() or destroyed using
1344 * destroy_worker().
1345 *
1346 * CONTEXT:
1347 * Might sleep.  Does GFP_KERNEL allocations.
1348 *
1349 * RETURNS:
1350 * Pointer to the newly created worker.
1351 */
1352static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1353{
1354        bool on_unbound_cpu = gcwq->cpu == WORK_CPU_UNBOUND;
1355        struct worker *worker = NULL;
1356        int id = -1;
1357
1358        spin_lock_irq(&gcwq->lock);
1359        while (ida_get_new(&gcwq->worker_ida, &id)) {
1360                spin_unlock_irq(&gcwq->lock);
1361                if (!ida_pre_get(&gcwq->worker_ida, GFP_KERNEL))
1362                        goto fail;
1363                spin_lock_irq(&gcwq->lock);
1364        }
1365        spin_unlock_irq(&gcwq->lock);
1366
1367        worker = alloc_worker();
1368        if (!worker)
1369                goto fail;
1370
1371        worker->gcwq = gcwq;
1372        worker->id = id;
1373
1374        if (!on_unbound_cpu)
1375                worker->task = kthread_create_on_node(worker_thread,
1376                                                      worker,
1377                                                      cpu_to_node(gcwq->cpu),
1378                                                      "kworker/%u:%d", gcwq->cpu, id);
1379        else
1380                worker->task = kthread_create(worker_thread, worker,
1381                                              "kworker/u:%d", id);
1382        if (IS_ERR(worker->task))
1383                goto fail;
1384
1385        /*
1386         * A rogue worker will become a regular one if CPU comes
1387         * online later on.  Make sure every worker has
1388         * PF_THREAD_BOUND set.
1389         */
1390        if (bind && !on_unbound_cpu)
1391                kthread_bind(worker->task, gcwq->cpu);
1392        else {
1393                worker->task->flags |= PF_THREAD_BOUND;
1394                if (on_unbound_cpu)
1395                        worker->flags |= WORKER_UNBOUND;
1396        }
1397
1398        return worker;
1399fail:
1400        if (id >= 0) {
1401                spin_lock_irq(&gcwq->lock);
1402                ida_remove(&gcwq->worker_ida, id);
1403                spin_unlock_irq(&gcwq->lock);
1404        }
1405        kfree(worker);
1406        return NULL;
1407}
1408
1409/**
1410 * start_worker - start a newly created worker
1411 * @worker: worker to start
1412 *
1413 * Make the gcwq aware of @worker and start it.
1414 *
1415 * CONTEXT:
1416 * spin_lock_irq(gcwq->lock).
1417 */
1418static void start_worker(struct worker *worker)
1419{
1420        worker->flags |= WORKER_STARTED;
1421        worker->gcwq->nr_workers++;
1422        worker_enter_idle(worker);
1423        wake_up_process(worker->task);
1424}
1425
1426/**
1427 * destroy_worker - destroy a workqueue worker
1428 * @worker: worker to be destroyed
1429 *
1430 * Destroy @worker and adjust @gcwq stats accordingly.
1431 *
1432 * CONTEXT:
1433 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1434 */
1435static void destroy_worker(struct worker *worker)
1436{
1437        struct global_cwq *gcwq = worker->gcwq;
1438        int id = worker->id;
1439
1440        /* sanity check frenzy */
1441        BUG_ON(worker->current_work);
1442        BUG_ON(!list_empty(&worker->scheduled));
1443
1444        if (worker->flags & WORKER_STARTED)
1445                gcwq->nr_workers--;
1446        if (worker->flags & WORKER_IDLE)
1447                gcwq->nr_idle--;
1448
1449        list_del_init(&worker->entry);
1450        worker->flags |= WORKER_DIE;
1451
1452        spin_unlock_irq(&gcwq->lock);
1453
1454        kthread_stop(worker->task);
1455        kfree(worker);
1456
1457        spin_lock_irq(&gcwq->lock);
1458        ida_remove(&gcwq->worker_ida, id);
1459}
1460
1461static void idle_worker_timeout(unsigned long __gcwq)
1462{
1463        struct global_cwq *gcwq = (void *)__gcwq;
1464
1465        spin_lock_irq(&gcwq->lock);
1466
1467        if (too_many_workers(gcwq)) {
1468                struct worker *worker;
1469                unsigned long expires;
1470
1471                /* idle_list is kept in LIFO order, check the last one */
1472                worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1473                expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1474
1475                if (time_before(jiffies, expires))
1476                        mod_timer(&gcwq->idle_timer, expires);
1477                else {
1478                        /* it's been idle for too long, wake up manager */
1479                        gcwq->flags |= GCWQ_MANAGE_WORKERS;
1480                        wake_up_worker(gcwq);
1481                }
1482        }
1483
1484        spin_unlock_irq(&gcwq->lock);
1485}
1486
1487static bool send_mayday(struct work_struct *work)
1488{
1489        struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1490        struct workqueue_struct *wq = cwq->wq;
1491        unsigned int cpu;
1492
1493        if (!(wq->flags & WQ_RESCUER))
1494                return false;
1495
1496        /* mayday mayday mayday */
1497        cpu = cwq->gcwq->cpu;
1498        /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1499        if (cpu == WORK_CPU_UNBOUND)
1500                cpu = 0;
1501        if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
1502                wake_up_process(wq->rescuer->task);
1503        return true;
1504}
1505
1506static void gcwq_mayday_timeout(unsigned long __gcwq)
1507{
1508        struct global_cwq *gcwq = (void *)__gcwq;
1509        struct work_struct *work;
1510
1511        spin_lock_irq(&gcwq->lock);
1512
1513        if (need_to_create_worker(gcwq)) {
1514                /*
1515                 * We've been trying to create a new worker but
1516                 * haven't been successful.  We might be hitting an
1517                 * allocation deadlock.  Send distress signals to
1518                 * rescuers.
1519                 */
1520                list_for_each_entry(work, &gcwq->worklist, entry)
1521                        send_mayday(work);
1522        }
1523
1524        spin_unlock_irq(&gcwq->lock);
1525
1526        mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INTERVAL);
1527}
1528
1529/**
1530 * maybe_create_worker - create a new worker if necessary
1531 * @gcwq: gcwq to create a new worker for
1532 *
1533 * Create a new worker for @gcwq if necessary.  @gcwq is guaranteed to
1534 * have at least one idle worker on return from this function.  If
1535 * creating a new worker takes longer than MAYDAY_INTERVAL, mayday is
1536 * sent to all rescuers with works scheduled on @gcwq to resolve
1537 * possible allocation deadlock.
1538 *
1539 * On return, need_to_create_worker() is guaranteed to be false and
1540 * may_start_working() true.
1541 *
1542 * LOCKING:
1543 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1544 * multiple times.  Does GFP_KERNEL allocations.  Called only from
1545 * manager.
1546 *
1547 * RETURNS:
1548 * false if no action was taken and gcwq->lock stayed locked, true
1549 * otherwise.
1550 */
1551static bool maybe_create_worker(struct global_cwq *gcwq)
1552__releases(&gcwq->lock)
1553__acquires(&gcwq->lock)
1554{
1555        if (!need_to_create_worker(gcwq))
1556                return false;
1557restart:
1558        spin_unlock_irq(&gcwq->lock);
1559
1560        /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */
1561        mod_timer(&gcwq->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1562
1563        while (true) {
1564                struct worker *worker;
1565
1566                worker = create_worker(gcwq, true);
1567                if (worker) {
1568                        del_timer_sync(&gcwq->mayday_timer);
1569                        spin_lock_irq(&gcwq->lock);
1570                        start_worker(worker);
1571                        BUG_ON(need_to_create_worker(gcwq));
1572                        return true;
1573                }
1574
1575                if (!need_to_create_worker(gcwq))
1576                        break;
1577
1578                __set_current_state(TASK_INTERRUPTIBLE);
1579                schedule_timeout(CREATE_COOLDOWN);
1580
1581                if (!need_to_create_worker(gcwq))
1582                        break;
1583        }
1584
1585        del_timer_sync(&gcwq->mayday_timer);
1586        spin_lock_irq(&gcwq->lock);
1587        if (need_to_create_worker(gcwq))
1588                goto restart;
1589        return true;
1590}
1591
1592/**
1593 * maybe_destroy_worker - destroy workers which have been idle for a while
1594 * @gcwq: gcwq to destroy workers for
1595 *
1596 * Destroy @gcwq workers which have been idle for longer than
1597 * IDLE_WORKER_TIMEOUT.
1598 *
1599 * LOCKING:
1600 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1601 * multiple times.  Called only from manager.
1602 *
1603 * RETURNS:
1604 * false if no action was taken and gcwq->lock stayed locked, true
1605 * otherwise.
1606 */
1607static bool maybe_destroy_workers(struct global_cwq *gcwq)
1608{
1609        bool ret = false;
1610
1611        while (too_many_workers(gcwq)) {
1612                struct worker *worker;
1613                unsigned long expires;
1614
1615                worker = list_entry(gcwq->idle_list.prev, struct worker, entry);
1616                expires = worker->last_active + IDLE_WORKER_TIMEOUT;
1617
1618                if (time_before(jiffies, expires)) {
1619                        mod_timer(&gcwq->idle_timer, expires);
1620                        break;
1621                }
1622
1623                destroy_worker(worker);
1624                ret = true;
1625        }
1626
1627        return ret;
1628}
1629
1630/**
1631 * manage_workers - manage worker pool
1632 * @worker: self
1633 *
1634 * Assume the manager role and manage gcwq worker pool @worker belongs
1635 * to.  At any given time, there can be only zero or one manager per
1636 * gcwq.  The exclusion is handled automatically by this function.
1637 *
1638 * The caller can safely start processing works on false return.  On
1639 * true return, it's guaranteed that need_to_create_worker() is false
1640 * and may_start_working() is true.
1641 *
1642 * CONTEXT:
1643 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1644 * multiple times.  Does GFP_KERNEL allocations.
1645 *
1646 * RETURNS:
1647 * false if no action was taken and gcwq->lock stayed locked, true if
1648 * some action was taken.
1649 */
1650static bool manage_workers(struct worker *worker)
1651{
1652        struct global_cwq *gcwq = worker->gcwq;
1653        bool ret = false;
1654
1655        if (gcwq->flags & GCWQ_MANAGING_WORKERS)
1656                return ret;
1657
1658        gcwq->flags &= ~GCWQ_MANAGE_WORKERS;
1659        gcwq->flags |= GCWQ_MANAGING_WORKERS;
1660
1661        /*
1662         * Destroy and then create so that may_start_working() is true
1663         * on return.
1664         */
1665        ret |= maybe_destroy_workers(gcwq);
1666        ret |= maybe_create_worker(gcwq);
1667
1668        gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
1669
1670        /*
1671         * The trustee might be waiting to take over the manager
1672         * position, tell it we're done.
1673         */
1674        if (unlikely(gcwq->trustee))
1675                wake_up_all(&gcwq->trustee_wait);
1676
1677        return ret;
1678}
1679
1680/**
1681 * move_linked_works - move linked works to a list
1682 * @work: start of series of works to be scheduled
1683 * @head: target list to append @work to
1684 * @nextp: out paramter for nested worklist walking
1685 *
1686 * Schedule linked works starting from @work to @head.  Work series to
1687 * be scheduled starts at @work and includes any consecutive work with
1688 * WORK_STRUCT_LINKED set in its predecessor.
1689 *
1690 * If @nextp is not NULL, it's updated to point to the next work of
1691 * the last scheduled work.  This allows move_linked_works() to be
1692 * nested inside outer list_for_each_entry_safe().
1693 *
1694 * CONTEXT:
1695 * spin_lock_irq(gcwq->lock).
1696 */
1697static void move_linked_works(struct work_struct *work, struct list_head *head,
1698                              struct work_struct **nextp)
1699{
1700        struct work_struct *n;
1701
1702        /*
1703         * Linked worklist will always end before the end of the list,
1704         * use NULL for list head.
1705         */
1706        list_for_each_entry_safe_from(work, n, NULL, entry) {
1707                list_move_tail(&work->entry, head);
1708                if (!(*work_data_bits(work) & WORK_STRUCT_LINKED))
1709                        break;
1710        }
1711
1712        /*
1713         * If we're already inside safe list traversal and have moved
1714         * multiple works to the scheduled queue, the next position
1715         * needs to be updated.
1716         */
1717        if (nextp)
1718                *nextp = n;
1719}
1720
1721static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1722{
1723        struct work_struct *work = list_first_entry(&cwq->delayed_works,
1724                                                    struct work_struct, entry);
1725        struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1726
1727        trace_workqueue_activate_work(work);
1728        move_linked_works(work, pos, NULL);
1729        __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1730        cwq->nr_active++;
1731}
1732
1733/**
1734 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1735 * @cwq: cwq of interest
1736 * @color: color of work which left the queue
1737 * @delayed: for a delayed work
1738 *
1739 * A work either has completed or is removed from pending queue,
1740 * decrement nr_in_flight of its cwq and handle workqueue flushing.
1741 *
1742 * CONTEXT:
1743 * spin_lock_irq(gcwq->lock).
1744 */
1745static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1746                                 bool delayed)
1747{
1748        /* ignore uncolored works */
1749        if (color == WORK_NO_COLOR)
1750                return;
1751
1752        cwq->nr_in_flight[color]--;
1753
1754        if (!delayed) {
1755                cwq->nr_active--;
1756                if (!list_empty(&cwq->delayed_works)) {
1757                        /* one down, submit a delayed one */
1758                        if (cwq->nr_active < cwq->max_active)
1759                                cwq_activate_first_delayed(cwq);
1760                }
1761        }
1762
1763        /* is flush in progress and are we at the flushing tip? */
1764        if (likely(cwq->flush_color != color))
1765                return;
1766
1767        /* are there still in-flight works? */
1768        if (cwq->nr_in_flight[color])
1769                return;
1770
1771        /* this cwq is done, clear flush_color */
1772        cwq->flush_color = -1;
1773
1774        /*
1775         * If this was the last cwq, wake up the first flusher.  It
1776         * will handle the rest.
1777         */
1778        if (atomic_dec_and_test(&cwq->wq->nr_cwqs_to_flush))
1779                complete(&cwq->wq->first_flusher->done);
1780}
1781
1782/**
1783 * process_one_work - process single work
1784 * @worker: self
1785 * @work: work to process
1786 *
1787 * Process @work.  This function contains all the logics necessary to
1788 * process a single work including synchronization against and
1789 * interaction with other workers on the same cpu, queueing and
1790 * flushing.  As long as context requirement is met, any worker can
1791 * call this function to process a work.
1792 *
1793 * CONTEXT:
1794 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1795 */
1796static void process_one_work(struct worker *worker, struct work_struct *work)
1797__releases(&gcwq->lock)
1798__acquires(&gcwq->lock)
1799{
1800        struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1801        struct global_cwq *gcwq = cwq->gcwq;
1802        struct hlist_head *bwh = busy_worker_head(gcwq, work);
1803        bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE;
1804        work_func_t f = work->func;
1805        int work_color;
1806        struct worker *collision;
1807#ifdef CONFIG_LOCKDEP
1808        /*
1809         * It is permissible to free the struct work_struct from
1810         * inside the function that is called from it, this we need to
1811         * take into account for lockdep too.  To avoid bogus "held
1812         * lock freed" warnings as well as problems when looking into
1813         * work->lockdep_map, make a copy and use that here.
1814         */
1815        struct lockdep_map lockdep_map = work->lockdep_map;
1816#endif
1817        /*
1818         * A single work shouldn't be executed concurrently by
1819         * multiple workers on a single cpu.  Check whether anyone is
1820         * already processing the work.  If so, defer the work to the
1821         * currently executing one.
1822         */
1823        collision = __find_worker_executing_work(gcwq, bwh, work);
1824        if (unlikely(collision)) {
1825                move_linked_works(work, &collision->scheduled, NULL);
1826                return;
1827        }
1828
1829        /* claim and process */
1830        debug_work_deactivate(work);
1831        hlist_add_head(&worker->hentry, bwh);
1832        worker->current_work = work;
1833        worker->current_cwq = cwq;
1834        work_color = get_work_color(work);
1835
1836        /* record the current cpu number in the work data and dequeue */
1837        set_work_cpu(work, gcwq->cpu);
1838        list_del_init(&work->entry);
1839
1840        /*
1841         * If HIGHPRI_PENDING, check the next work, and, if HIGHPRI,
1842         * wake up another worker; otherwise, clear HIGHPRI_PENDING.
1843         */
1844        if (unlikely(gcwq->flags & GCWQ_HIGHPRI_PENDING)) {
1845                struct work_struct *nwork = list_first_entry(&gcwq->worklist,
1846                                                struct work_struct, entry);
1847
1848                if (!list_empty(&gcwq->worklist) &&
1849                    get_work_cwq(nwork)->wq->flags & WQ_HIGHPRI)
1850                        wake_up_worker(gcwq);
1851                else
1852                        gcwq->flags &= ~GCWQ_HIGHPRI_PENDING;
1853        }
1854
1855        /*
1856         * CPU intensive works don't participate in concurrency
1857         * management.  They're the scheduler's responsibility.
1858         */
1859        if (unlikely(cpu_intensive))
1860                worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
1861
1862        spin_unlock_irq(&gcwq->lock);
1863
1864        work_clear_pending(work);
1865        lock_map_acquire_read(&cwq->wq->lockdep_map);
1866        lock_map_acquire(&lockdep_map);
1867        trace_workqueue_execute_start(work);
1868        f(work);
1869        /*
1870         * While we must be careful to not use "work" after this, the trace
1871         * point will only record its address.
1872         */
1873        trace_workqueue_execute_end(work);
1874        lock_map_release(&lockdep_map);
1875        lock_map_release(&cwq->wq->lockdep_map);
1876
1877        if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
1878                printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
1879                       "%s/0x%08x/%d\n",
1880                       current->comm, preempt_count(), task_pid_nr(current));
1881                printk(KERN_ERR "    last function: ");
1882                print_symbol("%s\n", (unsigned long)f);
1883                debug_show_held_locks(current);
1884                dump_stack();
1885        }
1886
1887        spin_lock_irq(&gcwq->lock);
1888
1889        /* clear cpu intensive status */
1890        if (unlikely(cpu_intensive))
1891                worker_clr_flags(worker, WORKER_CPU_INTENSIVE);
1892
1893        /* we're done with it, release */
1894        hlist_del_init(&worker->hentry);
1895        worker->current_work = NULL;
1896        worker->current_cwq = NULL;
1897        cwq_dec_nr_in_flight(cwq, work_color, false);
1898}
1899
1900/**
1901 * process_scheduled_works - process scheduled works
1902 * @worker: self
1903 *
1904 * Process all scheduled works.  Please note that the scheduled list
1905 * may change while processing a work, so this function repeatedly
1906 * fetches a work from the top and executes it.
1907 *
1908 * CONTEXT:
1909 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
1910 * multiple times.
1911 */
1912static void process_scheduled_works(struct worker *worker)
1913{
1914        while (!list_empty(&worker->scheduled)) {
1915                struct work_struct *work = list_first_entry(&worker->scheduled,
1916                                                struct work_struct, entry);
1917                process_one_work(worker, work);
1918        }
1919}
1920
1921/**
1922 * worker_thread - the worker thread function
1923 * @__worker: self
1924 *
1925 * The gcwq worker thread function.  There's a single dynamic pool of
1926 * these per each cpu.  These workers process all works regardless of
1927 * their specific target workqueue.  The only exception is works which
1928 * belong to workqueues with a rescuer which will be explained in
1929 * rescuer_thread().
1930 */
1931static int worker_thread(void *__worker)
1932{
1933        struct worker *worker = __worker;
1934        struct global_cwq *gcwq = worker->gcwq;
1935
1936        /* tell the scheduler that this is a workqueue worker */
1937        worker->task->flags |= PF_WQ_WORKER;
1938woke_up:
1939        spin_lock_irq(&gcwq->lock);
1940
1941        /* DIE can be set only while we're idle, checking here is enough */
1942        if (worker->flags & WORKER_DIE) {
1943                spin_unlock_irq(&gcwq->lock);
1944                worker->task->flags &= ~PF_WQ_WORKER;
1945                return 0;
1946        }
1947
1948        worker_leave_idle(worker);
1949recheck:
1950        /* no more worker necessary? */
1951        if (!need_more_worker(gcwq))
1952                goto sleep;
1953
1954        /* do we need to manage? */
1955        if (unlikely(!may_start_working(gcwq)) && manage_workers(worker))
1956                goto recheck;
1957
1958        /*
1959         * ->scheduled list can only be filled while a worker is
1960         * preparing to process a work or actually processing it.
1961         * Make sure nobody diddled with it while I was sleeping.
1962         */
1963        BUG_ON(!list_empty(&worker->scheduled));
1964
1965        /*
1966         * When control reaches this point, we're guaranteed to have
1967         * at least one idle worker or that someone else has already
1968         * assumed the manager role.
1969         */
1970        worker_clr_flags(worker, WORKER_PREP);
1971
1972        do {
1973                struct work_struct *work =
1974                        list_first_entry(&gcwq->worklist,
1975                                         struct work_struct, entry);
1976
1977                if (likely(!(*work_data_bits(work) & WORK_STRUCT_LINKED))) {
1978                        /* optimization path, not strictly necessary */
1979                        process_one_work(worker, work);
1980                        if (unlikely(!list_empty(&worker->scheduled)))
1981                                process_scheduled_works(worker);
1982                } else {
1983                        move_linked_works(work, &worker->scheduled, NULL);
1984                        process_scheduled_works(worker);
1985                }
1986        } while (keep_working(gcwq));
1987
1988        worker_set_flags(worker, WORKER_PREP, false);
1989sleep:
1990        if (unlikely(need_to_manage_workers(gcwq)) && manage_workers(worker))
1991                goto recheck;
1992
1993        /*
1994         * gcwq->lock is held and there's no work to process and no
1995         * need to manage, sleep.  Workers are woken up only while
1996         * holding gcwq->lock or from local cpu, so setting the
1997         * current state before releasing gcwq->lock is enough to
1998         * prevent losing any event.
1999         */
2000        worker_enter_idle(worker);
2001        __set_current_state(TASK_INTERRUPTIBLE);
2002        spin_unlock_irq(&gcwq->lock);
2003        schedule();
2004        goto woke_up;
2005}
2006
2007/**
2008 * rescuer_thread - the rescuer thread function
2009 * @__wq: the associated workqueue
2010 *
2011 * Workqueue rescuer thread function.  There's one rescuer for each
2012 * workqueue which has WQ_RESCUER set.
2013 *
2014 * Regular work processing on a gcwq may block trying to create a new
2015 * worker which uses GFP_KERNEL allocation which has slight chance of
2016 * developing into deadlock if some works currently on the same queue
2017 * need to be processed to satisfy the GFP_KERNEL allocation.  This is
2018 * the problem rescuer solves.
2019 *
2020 * When such condition is possible, the gcwq summons rescuers of all
2021 * workqueues which have works queued on the gcwq and let them process
2022 * those works so that forward progress can be guaranteed.
2023 *
2024 * This should happen rarely.
2025 */
2026static int rescuer_thread(void *__wq)
2027{
2028        struct workqueue_struct *wq = __wq;
2029        struct worker *rescuer = wq->rescuer;
2030        struct list_head *scheduled = &rescuer->scheduled;
2031        bool is_unbound = wq->flags & WQ_UNBOUND;
2032        unsigned int cpu;
2033
2034        set_user_nice(current, RESCUER_NICE_LEVEL);
2035repeat:
2036        set_current_state(TASK_INTERRUPTIBLE);
2037
2038        if (kthread_should_stop())
2039                return 0;
2040
2041        /*
2042         * See whether any cpu is asking for help.  Unbounded
2043         * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
2044         */
2045        for_each_mayday_cpu(cpu, wq->mayday_mask) {
2046                unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
2047                struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
2048                struct global_cwq *gcwq = cwq->gcwq;
2049                struct work_struct *work, *n;
2050
2051                __set_current_state(TASK_RUNNING);
2052                mayday_clear_cpu(cpu, wq->mayday_mask);
2053
2054                /* migrate to the target cpu if possible */
2055                rescuer->gcwq = gcwq;
2056                worker_maybe_bind_and_lock(rescuer);
2057
2058                /*
2059                 * Slurp in all works issued via this workqueue and
2060                 * process'em.
2061                 */
2062                BUG_ON(!list_empty(&rescuer->scheduled));
2063                list_for_each_entry_safe(work, n, &gcwq->worklist, entry)
2064                        if (get_work_cwq(work) == cwq)
2065                                move_linked_works(work, scheduled, &n);
2066
2067                process_scheduled_works(rescuer);
2068
2069                /*
2070                 * Leave this gcwq.  If keep_working() is %true, notify a
2071                 * regular worker; otherwise, we end up with 0 concurrency
2072                 * and stalling the execution.
2073                 */
2074                if (keep_working(gcwq))
2075                        wake_up_worker(gcwq);
2076
2077                spin_unlock_irq(&gcwq->lock);
2078        }
2079
2080        schedule();
2081        goto repeat;
2082}
2083
2084struct wq_barrier {
2085        struct work_struct      work;
2086        struct completion       done;
2087};
2088
2089static void wq_barrier_func(struct work_struct *work)
2090{
2091        struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
2092        complete(&barr->done);
2093}
2094
2095/**
2096 * insert_wq_barrier - insert a barrier work
2097 * @cwq: cwq to insert barrier into
2098 * @barr: wq_barrier to insert
2099 * @target: target work to attach @barr to
2100 * @worker: worker currently executing @target, NULL if @target is not executing
2101 *
2102 * @barr is linked to @target such that @barr is completed only after
2103 * @target finishes execution.  Please note that the ordering
2104 * guarantee is observed only with respect to @target and on the local
2105 * cpu.
2106 *
2107 * Currently, a queued barrier can't be canceled.  This is because
2108 * try_to_grab_pending() can't determine whether the work to be
2109 * grabbed is at the head of the queue and thus can't clear LINKED
2110 * flag of the previous work while there must be a valid next work
2111 * after a work with LINKED flag set.
2112 *
2113 * Note that when @worker is non-NULL, @target may be modified
2114 * underneath us, so we can't reliably determine cwq from @target.
2115 *
2116 * CONTEXT:
2117 * spin_lock_irq(gcwq->lock).
2118 */
2119static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
2120                              struct wq_barrier *barr,
2121                              struct work_struct *target, struct worker *worker)
2122{
2123        struct list_head *head;
2124        unsigned int linked = 0;
2125
2126        /*
2127         * debugobject calls are safe here even with gcwq->lock locked
2128         * as we know for sure that this will not trigger any of the
2129         * checks and call back into the fixup functions where we
2130         * might deadlock.
2131         */
2132        INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
2133        __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
2134        init_completion(&barr->done);
2135
2136        /*
2137         * If @target is currently being executed, schedule the
2138         * barrier to the worker; otherwise, put it after @target.
2139         */
2140        if (worker)
2141                head = worker->scheduled.next;
2142        else {
2143                unsigned long *bits = work_data_bits(target);
2144
2145                head = target->entry.next;
2146                /* there can already be other linked works, inherit and set */
2147                linked = *bits & WORK_STRUCT_LINKED;
2148                __set_bit(WORK_STRUCT_LINKED_BIT, bits);
2149        }
2150
2151        debug_work_activate(&barr->work);
2152        insert_work(cwq, &barr->work, head,
2153                    work_color_to_flags(WORK_NO_COLOR) | linked);
2154}
2155
2156/**
2157 * flush_workqueue_prep_cwqs - prepare cwqs for workqueue flushing
2158 * @wq: workqueue being flushed
2159 * @flush_color: new flush color, < 0 for no-op
2160 * @work_color: new work color, < 0 for no-op
2161 *
2162 * Prepare cwqs for workqueue flushing.
2163 *
2164 * If @flush_color is non-negative, flush_color on all cwqs should be
2165 * -1.  If no cwq has in-flight commands at the specified color, all
2166 * cwq->flush_color's stay at -1 and %false is returned.  If any cwq
2167 * has in flight commands, its cwq->flush_color is set to
2168 * @flush_color, @wq->nr_cwqs_to_flush is updated accordingly, cwq
2169 * wakeup logic is armed and %true is returned.
2170 *
2171 * The caller should have initialized @wq->first_flusher prior to
2172 * calling this function with non-negative @flush_color.  If
2173 * @flush_color is negative, no flush color update is done and %false
2174 * is returned.
2175 *
2176 * If @work_color is non-negative, all cwqs should have the same
2177 * work_color which is previous to @work_color and all will be
2178 * advanced to @work_color.
2179 *
2180 * CONTEXT:
2181 * mutex_lock(wq->flush_mutex).
2182 *
2183 * RETURNS:
2184 * %true if @flush_color >= 0 and there's something to flush.  %false
2185 * otherwise.
2186 */
2187static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq,
2188                                      int flush_color, int work_color)
2189{
2190        bool wait = false;
2191        unsigned int cpu;
2192
2193        if (flush_color >= 0) {
2194                BUG_ON(atomic_read(&wq->nr_cwqs_to_flush));
2195                atomic_set(&wq->nr_cwqs_to_flush, 1);
2196        }
2197
2198        for_each_cwq_cpu(cpu, wq) {
2199                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2200                struct global_cwq *gcwq = cwq->gcwq;
2201
2202                spin_lock_irq(&gcwq->lock);
2203
2204                if (flush_color >= 0) {
2205                        BUG_ON(cwq->flush_color != -1);
2206
2207                        if (cwq->nr_in_flight[flush_color]) {
2208                                cwq->flush_color = flush_color;
2209                                atomic_inc(&wq->nr_cwqs_to_flush);
2210                                wait = true;
2211                        }
2212                }
2213
2214                if (work_color >= 0) {
2215                        BUG_ON(work_color != work_next_color(cwq->work_color));
2216                        cwq->work_color = work_color;
2217                }
2218
2219                spin_unlock_irq(&gcwq->lock);
2220        }
2221
2222        if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush))
2223                complete(&wq->first_flusher->done);
2224
2225        return wait;
2226}
2227
2228/**
2229 * flush_workqueue - ensure that any scheduled work has run to completion.
2230 * @wq: workqueue to flush
2231 *
2232 * Forces execution of the workqueue and blocks until its completion.
2233 * This is typically used in driver shutdown handlers.
2234 *
2235 * We sleep until all works which were queued on entry have been handled,
2236 * but we are not livelocked by new incoming ones.
2237 */
2238void flush_workqueue(struct workqueue_struct *wq)
2239{
2240        struct wq_flusher this_flusher = {
2241                .list = LIST_HEAD_INIT(this_flusher.list),
2242                .flush_color = -1,
2243                .done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done),
2244        };
2245        int next_color;
2246
2247        lock_map_acquire(&wq->lockdep_map);
2248        lock_map_release(&wq->lockdep_map);
2249
2250        mutex_lock(&wq->flush_mutex);
2251
2252        /*
2253         * Start-to-wait phase
2254         */
2255        next_color = work_next_color(wq->work_color);
2256
2257        if (next_color != wq->flush_color) {
2258                /*
2259                 * Color space is not full.  The current work_color
2260                 * becomes our flush_color and work_color is advanced
2261                 * by one.
2262                 */
2263                BUG_ON(!list_empty(&wq->flusher_overflow));
2264                this_flusher.flush_color = wq->work_color;
2265                wq->work_color = next_color;
2266
2267                if (!wq->first_flusher) {
2268                        /* no flush in progress, become the first flusher */
2269                        BUG_ON(wq->flush_color != this_flusher.flush_color);
2270
2271                        wq->first_flusher = &this_flusher;
2272
2273                        if (!flush_workqueue_prep_cwqs(wq, wq->flush_color,
2274                                                       wq->work_color)) {
2275                                /* nothing to flush, done */
2276                                wq->flush_color = next_color;
2277                                wq->first_flusher = NULL;
2278                                goto out_unlock;
2279                        }
2280                } else {
2281                        /* wait in queue */
2282                        BUG_ON(wq->flush_color == this_flusher.flush_color);
2283                        list_add_tail(&this_flusher.list, &wq->flusher_queue);
2284                        flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2285                }
2286        } else {
2287                /*
2288                 * Oops, color space is full, wait on overflow queue.
2289                 * The next flush completion will assign us
2290                 * flush_color and transfer to flusher_queue.
2291                 */
2292                list_add_tail(&this_flusher.list, &wq->flusher_overflow);
2293        }
2294
2295        mutex_unlock(&wq->flush_mutex);
2296
2297        wait_for_completion(&this_flusher.done);
2298
2299        /*
2300         * Wake-up-and-cascade phase
2301         *
2302         * First flushers are responsible for cascading flushes and
2303         * handling overflow.  Non-first flushers can simply return.
2304         */
2305        if (wq->first_flusher != &this_flusher)
2306                return;
2307
2308        mutex_lock(&wq->flush_mutex);
2309
2310        /* we might have raced, check again with mutex held */
2311        if (wq->first_flusher != &this_flusher)
2312                goto out_unlock;
2313
2314        wq->first_flusher = NULL;
2315
2316        BUG_ON(!list_empty(&this_flusher.list));
2317        BUG_ON(wq->flush_color != this_flusher.flush_color);
2318
2319        while (true) {
2320                struct wq_flusher *next, *tmp;
2321
2322                /* complete all the flushers sharing the current flush color */
2323                list_for_each_entry_safe(next, tmp, &wq->flusher_queue, list) {
2324                        if (next->flush_color != wq->flush_color)
2325                                break;
2326                        list_del_init(&next->list);
2327                        complete(&next->done);
2328                }
2329
2330                BUG_ON(!list_empty(&wq->flusher_overflow) &&
2331                       wq->flush_color != work_next_color(wq->work_color));
2332
2333                /* this flush_color is finished, advance by one */
2334                wq->flush_color = work_next_color(wq->flush_color);
2335
2336                /* one color has been freed, handle overflow queue */
2337                if (!list_empty(&wq->flusher_overflow)) {
2338                        /*
2339                         * Assign the same color to all overflowed
2340                         * flushers, advance work_color and append to
2341                         * flusher_queue.  This is the start-to-wait
2342                         * phase for these overflowed flushers.
2343                         */
2344                        list_for_each_entry(tmp, &wq->flusher_overflow, list)
2345                                tmp->flush_color = wq->work_color;
2346
2347                        wq->work_color = work_next_color(wq->work_color);
2348
2349                        list_splice_tail_init(&wq->flusher_overflow,
2350                                              &wq->flusher_queue);
2351                        flush_workqueue_prep_cwqs(wq, -1, wq->work_color);
2352                }
2353
2354                if (list_empty(&wq->flusher_queue)) {
2355                        BUG_ON(wq->flush_color != wq->work_color);
2356                        break;
2357                }
2358
2359                /*
2360                 * Need to flush more colors.  Make the next flusher
2361                 * the new first flusher and arm cwqs.
2362                 */
2363                BUG_ON(wq->flush_color == wq->work_color);
2364                BUG_ON(wq->flush_color != next->flush_color);
2365
2366                list_del_init(&next->list);
2367                wq->first_flusher = next;
2368
2369                if (flush_workqueue_prep_cwqs(wq, wq->flush_color, -1))
2370                        break;
2371
2372                /*
2373                 * Meh... this color is already done, clear first
2374                 * flusher and repeat cascading.
2375                 */
2376                wq->first_flusher = NULL;
2377        }
2378
2379out_unlock:
2380        mutex_unlock(&wq->flush_mutex);
2381}
2382EXPORT_SYMBOL_GPL(flush_workqueue);
2383
2384static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2385                             bool wait_executing)
2386{
2387        struct worker *worker = NULL;
2388        struct global_cwq *gcwq;
2389        struct cpu_workqueue_struct *cwq;
2390
2391        might_sleep();
2392        gcwq = get_work_gcwq(work);
2393        if (!gcwq)
2394                return false;
2395
2396        spin_lock_irq(&gcwq->lock);
2397        if (!list_empty(&work->entry)) {
2398                /*
2399                 * See the comment near try_to_grab_pending()->smp_rmb().
2400                 * If it was re-queued to a different gcwq under us, we
2401                 * are not going to wait.
2402                 */
2403                smp_rmb();
2404                cwq = get_work_cwq(work);
2405                if (unlikely(!cwq || gcwq != cwq->gcwq))
2406                        goto already_gone;
2407        } else if (wait_executing) {
2408                worker = find_worker_executing_work(gcwq, work);
2409                if (!worker)
2410                        goto already_gone;
2411                cwq = worker->current_cwq;
2412        } else
2413                goto already_gone;
2414
2415        insert_wq_barrier(cwq, barr, work, worker);
2416        spin_unlock_irq(&gcwq->lock);
2417
2418        /*
2419         * If @max_active is 1 or rescuer is in use, flushing another work
2420         * item on the same workqueue may lead to deadlock.  Make sure the
2421         * flusher is not running on the same workqueue by verifying write
2422         * access.
2423         */
2424        if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2425                lock_map_acquire(&cwq->wq->lockdep_map);
2426        else
2427                lock_map_acquire_read(&cwq->wq->lockdep_map);
2428        lock_map_release(&cwq->wq->lockdep_map);
2429
2430        return true;
2431already_gone:
2432        spin_unlock_irq(&gcwq->lock);
2433        return false;
2434}
2435
2436/**
2437 * flush_work - wait for a work to finish executing the last queueing instance
2438 * @work: the work to flush
2439 *
2440 * Wait until @work has finished execution.  This function considers
2441 * only the last queueing instance of @work.  If @work has been
2442 * enqueued across different CPUs on a non-reentrant workqueue or on
2443 * multiple workqueues, @work might still be executing on return on
2444 * some of the CPUs from earlier queueing.
2445 *
2446 * If @work was queued only on a non-reentrant, ordered or unbound
2447 * workqueue, @work is guaranteed to be idle on return if it hasn't
2448 * been requeued since flush started.
2449 *
2450 * RETURNS:
2451 * %true if flush_work() waited for the work to finish execution,
2452 * %false if it was already idle.
2453 */
2454bool flush_work(struct work_struct *work)
2455{
2456        struct wq_barrier barr;
2457
2458        if (start_flush_work(work, &barr, true)) {
2459                wait_for_completion(&barr.done);
2460                destroy_work_on_stack(&barr.work);
2461                return true;
2462        } else
2463                return false;
2464}
2465EXPORT_SYMBOL_GPL(flush_work);
2466
2467static bool wait_on_cpu_work(struct global_cwq *gcwq, struct work_struct *work)
2468{
2469        struct wq_barrier barr;
2470        struct worker *worker;
2471
2472        spin_lock_irq(&gcwq->lock);
2473
2474        worker = find_worker_executing_work(gcwq, work);
2475        if (unlikely(worker))
2476                insert_wq_barrier(worker->current_cwq, &barr, work, worker);
2477
2478        spin_unlock_irq(&gcwq->lock);
2479
2480        if (unlikely(worker)) {
2481                wait_for_completion(&barr.done);
2482                destroy_work_on_stack(&barr.work);
2483                return true;
2484        } else
2485                return false;
2486}
2487
2488static bool wait_on_work(struct work_struct *work)
2489{
2490        bool ret = false;
2491        int cpu;
2492
2493        might_sleep();
2494
2495        lock_map_acquire(&work->lockdep_map);
2496        lock_map_release(&work->lockdep_map);
2497
2498        for_each_gcwq_cpu(cpu)
2499                ret |= wait_on_cpu_work(get_gcwq(cpu), work);
2500        return ret;
2501}
2502
2503/**
2504 * flush_work_sync - wait until a work has finished execution
2505 * @work: the work to flush
2506 *
2507 * Wait until @work has finished execution.  On return, it's
2508 * guaranteed that all queueing instances of @work which happened
2509 * before this function is called are finished.  In other words, if
2510 * @work hasn't been requeued since this function was called, @work is
2511 * guaranteed to be idle on return.
2512 *
2513 * RETURNS:
2514 * %true if flush_work_sync() waited for the work to finish execution,
2515 * %false if it was already idle.
2516 */
2517bool flush_work_sync(struct work_struct *work)
2518{
2519        struct wq_barrier barr;
2520        bool pending, waited;
2521
2522        /* we'll wait for executions separately, queue barr only if pending */
2523        pending = start_flush_work(work, &barr, false);
2524
2525        /* wait for executions to finish */
2526        waited = wait_on_work(work);
2527
2528        /* wait for the pending one */
2529        if (pending) {
2530                wait_for_completion(&barr.done);
2531                destroy_work_on_stack(&barr.work);
2532        }
2533
2534        return pending || waited;
2535}
2536EXPORT_SYMBOL_GPL(flush_work_sync);
2537
2538/*
2539 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
2540 * so this work can't be re-armed in any way.
2541 */
2542static int try_to_grab_pending(struct work_struct *work)
2543{
2544        struct global_cwq *gcwq;
2545        int ret = -1;
2546
2547        if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
2548                return 0;
2549
2550        /*
2551         * The queueing is in progress, or it is already queued. Try to
2552         * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
2553         */
2554        gcwq = get_work_gcwq(work);
2555        if (!gcwq)
2556                return ret;
2557
2558        spin_lock_irq(&gcwq->lock);
2559        if (!list_empty(&work->entry)) {
2560                /*
2561                 * This work is queued, but perhaps we locked the wrong gcwq.
2562                 * In that case we must see the new value after rmb(), see
2563                 * insert_work()->wmb().
2564                 */
2565                smp_rmb();
2566                if (gcwq == get_work_gcwq(work)) {
2567                        debug_work_deactivate(work);
2568                        list_del_init(&work->entry);
2569                        cwq_dec_nr_in_flight(get_work_cwq(work),
2570                                get_work_color(work),
2571                                *work_data_bits(work) & WORK_STRUCT_DELAYED);
2572                        ret = 1;
2573                }
2574        }
2575        spin_unlock_irq(&gcwq->lock);
2576
2577        return ret;
2578}
2579
2580static bool __cancel_work_timer(struct work_struct *work,
2581                                struct timer_list* timer)
2582{
2583        int ret;
2584
2585        do {
2586                ret = (timer && likely(del_timer(timer)));
2587                if (!ret)
2588                        ret = try_to_grab_pending(work);
2589                wait_on_work(work);
2590        } while (unlikely(ret < 0));
2591
2592        clear_work_data(work);
2593        return ret;
2594}
2595
2596/**
2597 * cancel_work_sync - cancel a work and wait for it to finish
2598 * @work: the work to cancel
2599 *
2600 * Cancel @work and wait for its execution to finish.  This function
2601 * can be used even if the work re-queues itself or migrates to
2602 * another workqueue.  On return from this function, @work is
2603 * guaranteed to be not pending or executing on any CPU.
2604 *
2605 * cancel_work_sync(&delayed_work->work) must not be used for
2606 * delayed_work's.  Use cancel_delayed_work_sync() instead.
2607 *
2608 * The caller must ensure that the workqueue on which @work was last
2609 * queued can't be destroyed before this function returns.
2610 *
2611 * RETURNS:
2612 * %true if @work was pending, %false otherwise.
2613 */
2614bool cancel_work_sync(struct work_struct *work)
2615{
2616        return __cancel_work_timer(work, NULL);
2617}
2618EXPORT_SYMBOL_GPL(cancel_work_sync);
2619
2620/**
2621 * flush_delayed_work - wait for a dwork to finish executing the last queueing
2622 * @dwork: the delayed work to flush
2623 *
2624 * Delayed timer is cancelled and the pending work is queued for
2625 * immediate execution.  Like flush_work(), this function only
2626 * considers the last queueing instance of @dwork.
2627 *
2628 * RETURNS:
2629 * %true if flush_work() waited for the work to finish execution,
2630 * %false if it was already idle.
2631 */
2632bool flush_delayed_work(struct delayed_work *dwork)
2633{
2634        if (del_timer_sync(&dwork->timer))
2635                __queue_work(raw_smp_processor_id(),
2636                             get_work_cwq(&dwork->work)->wq, &dwork->work);
2637        return flush_work(&dwork->work);
2638}
2639EXPORT_SYMBOL(flush_delayed_work);
2640
2641/**
2642 * flush_delayed_work_sync - wait for a dwork to finish
2643 * @dwork: the delayed work to flush
2644 *
2645 * Delayed timer is cancelled and the pending work is queued for
2646 * execution immediately.  Other than timer handling, its behavior
2647 * is identical to flush_work_sync().
2648 *
2649 * RETURNS:
2650 * %true if flush_work_sync() waited for the work to finish execution,
2651 * %false if it was already idle.
2652 */
2653bool flush_delayed_work_sync(struct delayed_work *dwork)
2654{
2655        if (del_timer_sync(&dwork->timer))
2656                __queue_work(raw_smp_processor_id(),
2657                             get_work_cwq(&dwork->work)->wq, &dwork->work);
2658        return flush_work_sync(&dwork->work);
2659}
2660EXPORT_SYMBOL(flush_delayed_work_sync);
2661
2662/**
2663 * cancel_delayed_work_sync - cancel a delayed work and wait for it to finish
2664 * @dwork: the delayed work cancel
2665 *
2666 * This is cancel_work_sync() for delayed works.
2667 *
2668 * RETURNS:
2669 * %true if @dwork was pending, %false otherwise.
2670 */
2671bool cancel_delayed_work_sync(struct delayed_work *dwork)
2672{
2673        return __cancel_work_timer(&dwork->work, &dwork->timer);
2674}
2675EXPORT_SYMBOL(cancel_delayed_work_sync);
2676
2677/**
2678 * schedule_work - put work task in global workqueue
2679 * @work: job to be done
2680 *
2681 * Returns zero if @work was already on the kernel-global workqueue and
2682 * non-zero otherwise.
2683 *
2684 * This puts a job in the kernel-global workqueue if it was not already
2685 * queued and leaves it in the same position on the kernel-global
2686 * workqueue otherwise.
2687 */
2688int schedule_work(struct work_struct *work)
2689{
2690        return queue_work(system_wq, work);
2691}
2692EXPORT_SYMBOL(schedule_work);
2693
2694/*
2695 * schedule_work_on - put work task on a specific cpu
2696 * @cpu: cpu to put the work task on
2697 * @work: job to be done
2698 *
2699 * This puts a job on a specific cpu
2700 */
2701int schedule_work_on(int cpu, struct work_struct *work)
2702{
2703        return queue_work_on(cpu, system_wq, work);
2704}
2705EXPORT_SYMBOL(schedule_work_on);
2706
2707/**
2708 * schedule_delayed_work - put work task in global workqueue after delay
2709 * @dwork: job to be done
2710 * @delay: number of jiffies to wait or 0 for immediate execution
2711 *
2712 * After waiting for a given time this puts a job in the kernel-global
2713 * workqueue.
2714 */
2715int schedule_delayed_work(struct delayed_work *dwork,
2716                                        unsigned long delay)
2717{
2718        return queue_delayed_work(system_wq, dwork, delay);
2719}
2720EXPORT_SYMBOL(schedule_delayed_work);
2721
2722/**
2723 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
2724 * @cpu: cpu to use
2725 * @dwork: job to be done
2726 * @delay: number of jiffies to wait
2727 *
2728 * After waiting for a given time this puts a job in the kernel-global
2729 * workqueue on the specified CPU.
2730 */
2731int schedule_delayed_work_on(int cpu,
2732                        struct delayed_work *dwork, unsigned long delay)
2733{
2734        return queue_delayed_work_on(cpu, system_wq, dwork, delay);
2735}
2736EXPORT_SYMBOL(schedule_delayed_work_on);
2737
2738/**
2739 * schedule_on_each_cpu - execute a function synchronously on each online CPU
2740 * @func: the function to call
2741 *
2742 * schedule_on_each_cpu() executes @func on each online CPU using the
2743 * system workqueue and blocks until all CPUs have completed.
2744 * schedule_on_each_cpu() is very slow.
2745 *
2746 * RETURNS:
2747 * 0 on success, -errno on failure.
2748 */
2749int schedule_on_each_cpu(work_func_t func)
2750{
2751        int cpu;
2752        struct work_struct __percpu *works;
2753
2754        works = alloc_percpu(struct work_struct);
2755        if (!works)
2756                return -ENOMEM;
2757
2758        get_online_cpus();
2759
2760        for_each_online_cpu(cpu) {
2761                struct work_struct *work = per_cpu_ptr(works, cpu);
2762
2763                INIT_WORK(work, func);
2764                schedule_work_on(cpu, work);
2765        }
2766
2767        for_each_online_cpu(cpu)
2768                flush_work(per_cpu_ptr(works, cpu));
2769
2770        put_online_cpus();
2771        free_percpu(works);
2772        return 0;
2773}
2774
2775/**
2776 * flush_scheduled_work - ensure that any scheduled work has run to completion.
2777 *
2778 * Forces execution of the kernel-global workqueue and blocks until its
2779 * completion.
2780 *
2781 * Think twice before calling this function!  It's very easy to get into
2782 * trouble if you don't take great care.  Either of the following situations
2783 * will lead to deadlock:
2784 *
2785 *      One of the work items currently on the workqueue needs to acquire
2786 *      a lock held by your code or its caller.
2787 *
2788 *      Your code is running in the context of a work routine.
2789 *
2790 * They will be detected by lockdep when they occur, but the first might not
2791 * occur very often.  It depends on what work items are on the workqueue and
2792 * what locks they need, which you have no control over.
2793 *
2794 * In most situations flushing the entire workqueue is overkill; you merely
2795 * need to know that a particular work item isn't queued and isn't running.
2796 * In such cases you should use cancel_delayed_work_sync() or
2797 * cancel_work_sync() instead.
2798 */
2799void flush_scheduled_work(void)
2800{
2801        flush_workqueue(system_wq);
2802}
2803EXPORT_SYMBOL(flush_scheduled_work);
2804
2805/**
2806 * execute_in_process_context - reliably execute the routine with user context
2807 * @fn:         the function to execute
2808 * @ew:         guaranteed storage for the execute work structure (must
2809 *              be available when the work executes)
2810 *
2811 * Executes the function immediately if process context is available,
2812 * otherwise schedules the function for delayed execution.
2813 *
2814 * Returns:     0 - function was executed
2815 *              1 - function was scheduled for execution
2816 */
2817int execute_in_process_context(work_func_t fn, struct execute_work *ew)
2818{
2819        if (!in_interrupt()) {
2820                fn(&ew->work);
2821                return 0;
2822        }
2823
2824        INIT_WORK(&ew->work, fn);
2825        schedule_work(&ew->work);
2826
2827        return 1;
2828}
2829EXPORT_SYMBOL_GPL(execute_in_process_context);
2830
2831int keventd_up(void)
2832{
2833        return system_wq != NULL;
2834}
2835
2836static int alloc_cwqs(struct workqueue_struct *wq)
2837{
2838        /*
2839         * cwqs are forced aligned according to WORK_STRUCT_FLAG_BITS.
2840         * Make sure that the alignment isn't lower than that of
2841         * unsigned long long.
2842         */
2843        const size_t size = sizeof(struct cpu_workqueue_struct);
2844        const size_t align = max_t(size_t, 1 << WORK_STRUCT_FLAG_BITS,
2845                                   __alignof__(unsigned long long));
2846#ifdef CONFIG_SMP
2847        bool percpu = !(wq->flags & WQ_UNBOUND);
2848#else
2849        bool percpu = false;
2850#endif
2851
2852        if (percpu)
2853                wq->cpu_wq.pcpu = __alloc_percpu(size, align);
2854        else {
2855                void *ptr;
2856
2857                /*
2858                 * Allocate enough room to align cwq and put an extra
2859                 * pointer at the end pointing back to the originally
2860                 * allocated pointer which will be used for free.
2861                 */
2862                ptr = kzalloc(size + align + sizeof(void *), GFP_KERNEL);
2863                if (ptr) {
2864                        wq->cpu_wq.single = PTR_ALIGN(ptr, align);
2865                        *(void **)(wq->cpu_wq.single + 1) = ptr;
2866                }
2867        }
2868
2869        /* just in case, make sure it's actually aligned */
2870        BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2871        return wq->cpu_wq.v ? 0 : -ENOMEM;
2872}
2873
2874static void free_cwqs(struct workqueue_struct *wq)
2875{
2876#ifdef CONFIG_SMP
2877        bool percpu = !(wq->flags & WQ_UNBOUND);
2878#else
2879        bool percpu = false;
2880#endif
2881
2882        if (percpu)
2883                free_percpu(wq->cpu_wq.pcpu);
2884        else if (wq->cpu_wq.single) {
2885                /* the pointer to free is stored right after the cwq */
2886                kfree(*(void **)(wq->cpu_wq.single + 1));
2887        }
2888}
2889
2890static int wq_clamp_max_active(int max_active, unsigned int flags,
2891                               const char *name)
2892{
2893        int lim = flags & WQ_UNBOUND ? WQ_UNBOUND_MAX_ACTIVE : WQ_MAX_ACTIVE;
2894
2895        if (max_active < 1 || max_active > lim)
2896                printk(KERN_WARNING "workqueue: max_active %d requested for %s "
2897                       "is out of range, clamping between %d and %d\n",
2898                       max_active, name, 1, lim);
2899
2900        return clamp_val(max_active, 1, lim);
2901}
2902
2903struct workqueue_struct *__alloc_workqueue_key(const char *name,
2904                                               unsigned int flags,
2905                                               int max_active,
2906                                               struct lock_class_key *key,
2907                                               const char *lock_name)
2908{
2909        struct workqueue_struct *wq;
2910        unsigned int cpu;
2911
2912        /*
2913         * Workqueues which may be used during memory reclaim should
2914         * have a rescuer to guarantee forward progress.
2915         */
2916        if (flags & WQ_MEM_RECLAIM)
2917                flags |= WQ_RESCUER;
2918
2919        /*
2920         * Unbound workqueues aren't concurrency managed and should be
2921         * dispatched to workers immediately.
2922         */
2923        if (flags & WQ_UNBOUND)
2924                flags |= WQ_HIGHPRI;
2925
2926        max_active = max_active ?: WQ_DFL_ACTIVE;
2927        max_active = wq_clamp_max_active(max_active, flags, name);
2928
2929        wq = kzalloc(sizeof(*wq), GFP_KERNEL);
2930        if (!wq)
2931                goto err;
2932
2933        wq->flags = flags;
2934        wq->saved_max_active = max_active;
2935        mutex_init(&wq->flush_mutex);
2936        atomic_set(&wq->nr_cwqs_to_flush, 0);
2937        INIT_LIST_HEAD(&wq->flusher_queue);
2938        INIT_LIST_HEAD(&wq->flusher_overflow);
2939
2940        wq->name = name;
2941        lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
2942        INIT_LIST_HEAD(&wq->list);
2943
2944        if (alloc_cwqs(wq) < 0)
2945                goto err;
2946
2947        for_each_cwq_cpu(cpu, wq) {
2948                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
2949                struct global_cwq *gcwq = get_gcwq(cpu);
2950
2951                BUG_ON((unsigned long)cwq & WORK_STRUCT_FLAG_MASK);
2952                cwq->gcwq = gcwq;
2953                cwq->wq = wq;
2954                cwq->flush_color = -1;
2955                cwq->max_active = max_active;
2956                INIT_LIST_HEAD(&cwq->delayed_works);
2957        }
2958
2959        if (flags & WQ_RESCUER) {
2960                struct worker *rescuer;
2961
2962                if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
2963                        goto err;
2964
2965                wq->rescuer = rescuer = alloc_worker();
2966                if (!rescuer)
2967                        goto err;
2968
2969                rescuer->task = kthread_create(rescuer_thread, wq, "%s", name);
2970                if (IS_ERR(rescuer->task))
2971                        goto err;
2972
2973                rescuer->task->flags |= PF_THREAD_BOUND;
2974                wake_up_process(rescuer->task);
2975        }
2976
2977        /*
2978         * workqueue_lock protects global freeze state and workqueues
2979         * list.  Grab it, set max_active accordingly and add the new
2980         * workqueue to workqueues list.
2981         */
2982        spin_lock(&workqueue_lock);
2983
2984        if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
2985                for_each_cwq_cpu(cpu, wq)
2986                        get_cwq(cpu, wq)->max_active = 0;
2987
2988        list_add(&wq->list, &workqueues);
2989
2990        spin_unlock(&workqueue_lock);
2991
2992        return wq;
2993err:
2994        if (wq) {
2995                free_cwqs(wq);
2996                free_mayday_mask(wq->mayday_mask);
2997                kfree(wq->rescuer);
2998                kfree(wq);
2999        }
3000        return NULL;
3001}
3002EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
3003
3004/**
3005 * destroy_workqueue - safely terminate a workqueue
3006 * @wq: target workqueue
3007 *
3008 * Safely destroy a workqueue. All work currently pending will be done first.
3009 */
3010void destroy_workqueue(struct workqueue_struct *wq)
3011{
3012        unsigned int flush_cnt = 0;
3013        unsigned int cpu;
3014
3015        /*
3016         * Mark @wq dying and drain all pending works.  Once WQ_DYING is
3017         * set, only chain queueing is allowed.  IOW, only currently
3018         * pending or running work items on @wq can queue further work
3019         * items on it.  @wq is flushed repeatedly until it becomes empty.
3020         * The number of flushing is detemined by the depth of chaining and
3021         * should be relatively short.  Whine if it takes too long.
3022         */
3023        wq->flags |= WQ_DYING;
3024reflush:
3025        flush_workqueue(wq);
3026
3027        for_each_cwq_cpu(cpu, wq) {
3028                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3029
3030                if (!cwq->nr_active && list_empty(&cwq->delayed_works))
3031                        continue;
3032
3033                if (++flush_cnt == 10 ||
3034                    (flush_cnt % 100 == 0 && flush_cnt <= 1000))
3035                        printk(KERN_WARNING "workqueue %s: flush on "
3036                               "destruction isn't complete after %u tries\n",
3037                               wq->name, flush_cnt);
3038                goto reflush;
3039        }
3040
3041        /*
3042         * wq list is used to freeze wq, remove from list after
3043         * flushing is complete in case freeze races us.
3044         */
3045        spin_lock(&workqueue_lock);
3046        list_del(&wq->list);
3047        spin_unlock(&workqueue_lock);
3048
3049        /* sanity check */
3050        for_each_cwq_cpu(cpu, wq) {
3051                struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3052                int i;
3053
3054                for (i = 0; i < WORK_NR_COLORS; i++)
3055                        BUG_ON(cwq->nr_in_flight[i]);
3056                BUG_ON(cwq->nr_active);
3057                BUG_ON(!list_empty(&cwq->delayed_works));
3058        }
3059
3060        if (wq->flags & WQ_RESCUER) {
3061                kthread_stop(wq->rescuer->task);
3062                free_mayday_mask(wq->mayday_mask);
3063                kfree(wq->rescuer);
3064        }
3065
3066        free_cwqs(wq);
3067        kfree(wq);
3068}
3069EXPORT_SYMBOL_GPL(destroy_workqueue);
3070
3071/**
3072 * workqueue_set_max_active - adjust max_active of a workqueue
3073 * @wq: target workqueue
3074 * @max_active: new max_active value.
3075 *
3076 * Set max_active of @wq to @max_active.
3077 *
3078 * CONTEXT:
3079 * Don't call from IRQ context.
3080 */
3081void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3082{
3083        unsigned int cpu;
3084
3085        max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
3086
3087        spin_lock(&workqueue_lock);
3088
3089        wq->saved_max_active = max_active;
3090
3091        for_each_cwq_cpu(cpu, wq) {
3092                struct global_cwq *gcwq = get_gcwq(cpu);
3093
3094                spin_lock_irq(&gcwq->lock);
3095
3096                if (!(wq->flags & WQ_FREEZABLE) ||
3097                    !(gcwq->flags & GCWQ_FREEZING))
3098                        get_cwq(gcwq->cpu, wq)->max_active = max_active;
3099
3100                spin_unlock_irq(&gcwq->lock);
3101        }
3102
3103        spin_unlock(&workqueue_lock);
3104}
3105EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3106
3107/**
3108 * workqueue_congested - test whether a workqueue is congested
3109 * @cpu: CPU in question
3110 * @wq: target workqueue
3111 *
3112 * Test whether @wq's cpu workqueue for @cpu is congested.  There is
3113 * no synchronization around this function and the test result is
3114 * unreliable and only useful as advisory hints or for debugging.
3115 *
3116 * RETURNS:
3117 * %true if congested, %false otherwise.
3118 */
3119bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq)
3120{
3121        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3122
3123        return !list_empty(&cwq->delayed_works);
3124}
3125EXPORT_SYMBOL_GPL(workqueue_congested);
3126
3127/**
3128 * work_cpu - return the last known associated cpu for @work
3129 * @work: the work of interest
3130 *
3131 * RETURNS:
3132 * CPU number if @work was ever queued.  WORK_CPU_NONE otherwise.
3133 */
3134unsigned int work_cpu(struct work_struct *work)
3135{
3136        struct global_cwq *gcwq = get_work_gcwq(work);
3137
3138        return gcwq ? gcwq->cpu : WORK_CPU_NONE;
3139}
3140EXPORT_SYMBOL_GPL(work_cpu);
3141
3142/**
3143 * work_busy - test whether a work is currently pending or running
3144 * @work: the work to be tested
3145 *
3146 * Test whether @work is currently pending or running.  There is no
3147 * synchronization around this function and the test result is
3148 * unreliable and only useful as advisory hints or for debugging.
3149 * Especially for reentrant wqs, the pending state might hide the
3150 * running state.
3151 *
3152 * RETURNS:
3153 * OR'd bitmask of WORK_BUSY_* bits.
3154 */
3155unsigned int work_busy(struct work_struct *work)
3156{
3157        struct global_cwq *gcwq = get_work_gcwq(work);
3158        unsigned long flags;
3159        unsigned int ret = 0;
3160
3161        if (!gcwq)
3162                return false;
3163
3164        spin_lock_irqsave(&gcwq->lock, flags);
3165
3166        if (work_pending(work))
3167                ret |= WORK_BUSY_PENDING;
3168        if (find_worker_executing_work(gcwq, work))
3169                ret |= WORK_BUSY_RUNNING;
3170
3171        spin_unlock_irqrestore(&gcwq->lock, flags);
3172
3173        return ret;
3174}
3175EXPORT_SYMBOL_GPL(work_busy);
3176
3177/*
3178 * CPU hotplug.
3179 *
3180 * There are two challenges in supporting CPU hotplug.  Firstly, there
3181 * are a lot of assumptions on strong associations among work, cwq and
3182 * gcwq which make migrating pending and scheduled works very
3183 * difficult to implement without impacting hot paths.  Secondly,
3184 * gcwqs serve mix of short, long and very long running works making
3185 * blocked draining impractical.
3186 *
3187 * This is solved by allowing a gcwq to be detached from CPU, running
3188 * it with unbound (rogue) workers and allowing it to be reattached
3189 * later if the cpu comes back online.  A separate thread is created
3190 * to govern a gcwq in such state and is called the trustee of the
3191 * gcwq.
3192 *
3193 * Trustee states and their descriptions.
3194 *
3195 * START        Command state used on startup.  On CPU_DOWN_PREPARE, a
3196 *              new trustee is started with this state.
3197 *
3198 * IN_CHARGE    Once started, trustee will enter this state after
3199 *              assuming the manager role and making all existing
3200 *              workers rogue.  DOWN_PREPARE waits for trustee to
3201 *              enter this state.  After reaching IN_CHARGE, trustee
3202 *              tries to execute the pending worklist until it's empty
3203 *              and the state is set to BUTCHER, or the state is set
3204 *              to RELEASE.
3205 *
3206 * BUTCHER      Command state which is set by the cpu callback after
3207 *              the cpu has went down.  Once this state is set trustee
3208 *              knows that there will be no new works on the worklist
3209 *              and once the worklist is empty it can proceed to
3210 *              killing idle workers.
3211 *
3212 * RELEASE      Command state which is set by the cpu callback if the
3213 *              cpu down has been canceled or it has come online
3214 *              again.  After recognizing this state, trustee stops
3215 *              trying to drain or butcher and clears ROGUE, rebinds
3216 *              all remaining workers back to the cpu and releases
3217 *              manager role.
3218 *
3219 * DONE         Trustee will enter this state after BUTCHER or RELEASE
3220 *              is complete.
3221 *
3222 *          trustee                 CPU                draining
3223 *         took over                down               complete
3224 * START -----------> IN_CHARGE -----------> BUTCHER -----------> DONE
3225 *                        |                     |                  ^
3226 *                        | CPU is back online  v   return workers |
3227 *                         ----------------> RELEASE --------------
3228 */
3229
3230/**
3231 * trustee_wait_event_timeout - timed event wait for trustee
3232 * @cond: condition to wait for
3233 * @timeout: timeout in jiffies
3234 *
3235 * wait_event_timeout() for trustee to use.  Handles locking and
3236 * checks for RELEASE request.
3237 *
3238 * CONTEXT:
3239 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3240 * multiple times.  To be used by trustee.
3241 *
3242 * RETURNS:
3243 * Positive indicating left time if @cond is satisfied, 0 if timed
3244 * out, -1 if canceled.
3245 */
3246#define trustee_wait_event_timeout(cond, timeout) ({                    \
3247        long __ret = (timeout);                                         \
3248        while (!((cond) || (gcwq->trustee_state == TRUSTEE_RELEASE)) && \
3249               __ret) {                                                 \
3250                spin_unlock_irq(&gcwq->lock);                           \
3251                __wait_event_timeout(gcwq->trustee_wait, (cond) ||      \
3252                        (gcwq->trustee_state == TRUSTEE_RELEASE),       \
3253                        __ret);                                         \
3254                spin_lock_irq(&gcwq->lock);                             \
3255        }                                                               \
3256        gcwq->trustee_state == TRUSTEE_RELEASE ? -1 : (__ret);          \
3257})
3258
3259/**
3260 * trustee_wait_event - event wait for trustee
3261 * @cond: condition to wait for
3262 *
3263 * wait_event() for trustee to use.  Automatically handles locking and
3264 * checks for CANCEL request.
3265 *
3266 * CONTEXT:
3267 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3268 * multiple times.  To be used by trustee.
3269 *
3270 * RETURNS:
3271 * 0 if @cond is satisfied, -1 if canceled.
3272 */
3273#define trustee_wait_event(cond) ({                                     \
3274        long __ret1;                                                    \
3275        __ret1 = trustee_wait_event_timeout(cond, MAX_SCHEDULE_TIMEOUT);\
3276        __ret1 < 0 ? -1 : 0;                                            \
3277})
3278
3279static int __cpuinit trustee_thread(void *__gcwq)
3280{
3281        struct global_cwq *gcwq = __gcwq;
3282        struct worker *worker;
3283        struct work_struct *work;
3284        struct hlist_node *pos;
3285        long rc;
3286        int i;
3287
3288        BUG_ON(gcwq->cpu != smp_processor_id());
3289
3290        spin_lock_irq(&gcwq->lock);
3291        /*
3292         * Claim the manager position and make all workers rogue.
3293         * Trustee must be bound to the target cpu and can't be
3294         * cancelled.
3295         */
3296        BUG_ON(gcwq->cpu != smp_processor_id());
3297        rc = trustee_wait_event(!(gcwq->flags & GCWQ_MANAGING_WORKERS));
3298        BUG_ON(rc < 0);
3299
3300        gcwq->flags |= GCWQ_MANAGING_WORKERS;
3301
3302        list_for_each_entry(worker, &gcwq->idle_list, entry)
3303                worker->flags |= WORKER_ROGUE;
3304
3305        for_each_busy_worker(worker, i, pos, gcwq)
3306                worker->flags |= WORKER_ROGUE;
3307
3308        /*
3309         * Call schedule() so that we cross rq->lock and thus can
3310         * guarantee sched callbacks see the rogue flag.  This is
3311         * necessary as scheduler callbacks may be invoked from other
3312         * cpus.
3313         */
3314        spin_unlock_irq(&gcwq->lock);
3315        schedule();
3316        spin_lock_irq(&gcwq->lock);
3317
3318        /*
3319         * Sched callbacks are disabled now.  Zap nr_running.  After
3320         * this, nr_running stays zero and need_more_worker() and
3321         * keep_working() are always true as long as the worklist is
3322         * not empty.
3323         */
3324        atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
3325
3326        spin_unlock_irq(&gcwq->lock);
3327        del_timer_sync(&gcwq->idle_timer);
3328        spin_lock_irq(&gcwq->lock);
3329
3330        /*
3331         * We're now in charge.  Notify and proceed to drain.  We need
3332         * to keep the gcwq running during the whole CPU down
3333         * procedure as other cpu hotunplug callbacks may need to
3334         * flush currently running tasks.
3335         */
3336        gcwq->trustee_state = TRUSTEE_IN_CHARGE;
3337        wake_up_all(&gcwq->trustee_wait);
3338
3339        /*
3340         * The original cpu is in the process of dying and may go away
3341         * anytime now.  When that happens, we and all workers would
3342         * be migrated to other cpus.  Try draining any left work.  We
3343         * want to get it over with ASAP - spam rescuers, wake up as
3344         * many idlers as necessary and create new ones till the
3345         * worklist is empty.  Note that if the gcwq is frozen, there
3346         * may be frozen works in freezable cwqs.  Don't declare
3347         * completion while frozen.
3348         */
3349        while (gcwq->nr_workers != gcwq->nr_idle ||
3350               gcwq->flags & GCWQ_FREEZING ||
3351               gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
3352                int nr_works = 0;
3353
3354                list_for_each_entry(work, &gcwq->worklist, entry) {
3355                        send_mayday(work);
3356                        nr_works++;
3357                }
3358
3359                list_for_each_entry(worker, &gcwq->idle_list, entry) {
3360                        if (!nr_works--)
3361                                break;
3362                        wake_up_process(worker->task);
3363                }
3364
3365                if (need_to_create_worker(gcwq)) {
3366                        spin_unlock_irq(&gcwq->lock);
3367                        worker = create_worker(gcwq, false);
3368                        spin_lock_irq(&gcwq->lock);
3369                        if (worker) {
3370                                worker->flags |= WORKER_ROGUE;
3371                                start_worker(worker);
3372                        }
3373                }
3374
3375                /* give a breather */
3376                if (trustee_wait_event_timeout(false, TRUSTEE_COOLDOWN) < 0)
3377                        break;
3378        }
3379
3380        /*
3381         * Either all works have been scheduled and cpu is down, or
3382         * cpu down has already been canceled.  Wait for and butcher
3383         * all workers till we're canceled.
3384         */
3385        do {
3386                rc = trustee_wait_event(!list_empty(&gcwq->idle_list));
3387                while (!list_empty(&gcwq->idle_list))
3388                        destroy_worker(list_first_entry(&gcwq->idle_list,
3389                                                        struct worker, entry));
3390        } while (gcwq->nr_workers && rc >= 0);
3391
3392        /*
3393         * At this point, either draining has completed and no worker
3394         * is left, or cpu down has been canceled or the cpu is being
3395         * brought back up.  There shouldn't be any idle one left.
3396         * Tell the remaining busy ones to rebind once it finishes the
3397         * currently scheduled works by scheduling the rebind_work.
3398         */
3399        WARN_ON(!list_empty(&gcwq->idle_list));
3400
3401        for_each_busy_worker(worker, i, pos, gcwq) {
3402                struct work_struct *rebind_work = &worker->rebind_work;
3403
3404                /*
3405                 * Rebind_work may race with future cpu hotplug
3406                 * operations.  Use a separate flag to mark that
3407                 * rebinding is scheduled.
3408                 */
3409                worker->flags |= WORKER_REBIND;
3410                worker->flags &= ~WORKER_ROGUE;
3411
3412                /* queue rebind_work, wq doesn't matter, use the default one */
3413                if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
3414                                     work_data_bits(rebind_work)))
3415                        continue;
3416
3417                debug_work_activate(rebind_work);
3418                insert_work(get_cwq(gcwq->cpu, system_wq), rebind_work,
3419                            worker->scheduled.next,
3420                            work_color_to_flags(WORK_NO_COLOR));
3421        }
3422
3423        /* relinquish manager role */
3424        gcwq->flags &= ~GCWQ_MANAGING_WORKERS;
3425
3426        /* notify completion */
3427        gcwq->trustee = NULL;
3428        gcwq->trustee_state = TRUSTEE_DONE;
3429        wake_up_all(&gcwq->trustee_wait);
3430        spin_unlock_irq(&gcwq->lock);
3431        return 0;
3432}
3433
3434/**
3435 * wait_trustee_state - wait for trustee to enter the specified state
3436 * @gcwq: gcwq the trustee of interest belongs to
3437 * @state: target state to wait for
3438 *
3439 * Wait for the trustee to reach @state.  DONE is already matched.
3440 *
3441 * CONTEXT:
3442 * spin_lock_irq(gcwq->lock) which may be released and regrabbed
3443 * multiple times.  To be used by cpu_callback.
3444 */
3445static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3446__releases(&gcwq->lock)
3447__acquires(&gcwq->lock)
3448{
3449        if (!(gcwq->trustee_state == state ||
3450              gcwq->trustee_state == TRUSTEE_DONE)) {
3451                spin_unlock_irq(&gcwq->lock);
3452                __wait_event(gcwq->trustee_wait,
3453                             gcwq->trustee_state == state ||
3454                             gcwq->trustee_state == TRUSTEE_DONE);
3455                spin_lock_irq(&gcwq->lock);
3456        }
3457}
3458
3459static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3460                                                unsigned long action,
3461                                                void *hcpu)
3462{
3463        unsigned int cpu = (unsigned long)hcpu;
3464        struct global_cwq *gcwq = get_gcwq(cpu);
3465        struct task_struct *new_trustee = NULL;
3466        struct worker *uninitialized_var(new_worker);
3467        unsigned long flags;
3468
3469        action &= ~CPU_TASKS_FROZEN;
3470
3471        switch (action) {
3472        case CPU_DOWN_PREPARE:
3473                new_trustee = kthread_create(trustee_thread, gcwq,
3474                                             "workqueue_trustee/%d\n", cpu);
3475                if (IS_ERR(new_trustee))
3476                        return notifier_from_errno(PTR_ERR(new_trustee));
3477                kthread_bind(new_trustee, cpu);
3478                /* fall through */
3479        case CPU_UP_PREPARE:
3480                BUG_ON(gcwq->first_idle);
3481                new_worker = create_worker(gcwq, false);
3482                if (!new_worker) {
3483                        if (new_trustee)
3484                                kthread_stop(new_trustee);
3485                        return NOTIFY_BAD;
3486                }
3487        }
3488
3489        /* some are called w/ irq disabled, don't disturb irq status */
3490        spin_lock_irqsave(&gcwq->lock, flags);
3491
3492        switch (action) {
3493        case CPU_DOWN_PREPARE:
3494                /* initialize trustee and tell it to acquire the gcwq */
3495                BUG_ON(gcwq->trustee || gcwq->trustee_state != TRUSTEE_DONE);
3496                gcwq->trustee = new_trustee;
3497                gcwq->trustee_state = TRUSTEE_START;
3498                wake_up_process(gcwq->trustee);
3499                wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3500                /* fall through */
3501        case CPU_UP_PREPARE:
3502                BUG_ON(gcwq->first_idle);
3503                gcwq->first_idle = new_worker;
3504                break;
3505
3506        case CPU_DYING:
3507                /*
3508                 * Before this, the trustee and all workers except for
3509                 * the ones which are still executing works from
3510                 * before the last CPU down must be on the cpu.  After
3511                 * this, they'll all be diasporas.
3512                 */
3513                gcwq->flags |= GCWQ_DISASSOCIATED;
3514                break;
3515
3516        case CPU_POST_DEAD:
3517                gcwq->trustee_state = TRUSTEE_BUTCHER;
3518                /* fall through */
3519        case CPU_UP_CANCELED:
3520                destroy_worker(gcwq->first_idle);
3521                gcwq->first_idle = NULL;
3522                break;
3523
3524        case CPU_DOWN_FAILED:
3525        case CPU_ONLINE:
3526                gcwq->flags &= ~GCWQ_DISASSOCIATED;
3527                if (gcwq->trustee_state != TRUSTEE_DONE) {
3528                        gcwq->trustee_state = TRUSTEE_RELEASE;
3529                        wake_up_process(gcwq->trustee);
3530                        wait_trustee_state(gcwq, TRUSTEE_DONE);
3531                }
3532
3533                /*
3534                 * Trustee is done and there might be no worker left.
3535                 * Put the first_idle in and request a real manager to
3536                 * take a look.
3537                 */
3538                spin_unlock_irq(&gcwq->lock);
3539                kthread_bind(gcwq->first_idle->task, cpu);
3540                spin_lock_irq(&gcwq->lock);
3541                gcwq->flags |= GCWQ_MANAGE_WORKERS;
3542                start_worker(gcwq->first_idle);
3543                gcwq->first_idle = NULL;
3544                break;
3545        }
3546
3547        spin_unlock_irqrestore(&gcwq->lock, flags);
3548
3549        return notifier_from_errno(0);
3550}
3551
3552#ifdef CONFIG_SMP
3553
3554struct work_for_cpu {
3555        struct completion completion;
3556        long (*fn)(void *);
3557        void *arg;
3558        long ret;
3559};
3560
3561static int do_work_for_cpu(void *_wfc)
3562{
3563        struct work_for_cpu *wfc = _wfc;
3564        wfc->ret = wfc->fn(wfc->arg);
3565        complete(&wfc->completion);
3566        return 0;
3567}
3568
3569/**
3570 * work_on_cpu - run a function in user context on a particular cpu
3571 * @cpu: the cpu to run on
3572 * @fn: the function to run
3573 * @arg: the function arg
3574 *
3575 * This will return the value @fn returns.
3576 * It is up to the caller to ensure that the cpu doesn't go offline.
3577 * The caller must not hold any locks which would prevent @fn from completing.
3578 */
3579long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
3580{
3581        struct task_struct *sub_thread;
3582        struct work_for_cpu wfc = {
3583                .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
3584                .fn = fn,
3585                .arg = arg,
3586        };
3587
3588        sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
3589        if (IS_ERR(sub_thread))
3590                return PTR_ERR(sub_thread);
3591        kthread_bind(sub_thread, cpu);
3592        wake_up_process(sub_thread);
3593        wait_for_completion(&wfc.completion);
3594        return wfc.ret;
3595}
3596EXPORT_SYMBOL_GPL(work_on_cpu);
3597#endif /* CONFIG_SMP */
3598
3599#ifdef CONFIG_FREEZER
3600
3601/**
3602 * freeze_workqueues_begin - begin freezing workqueues
3603 *
3604 * Start freezing workqueues.  After this function returns, all freezable
3605 * workqueues will queue new works to their frozen_works list instead of
3606 * gcwq->worklist.
3607 *
3608 * CONTEXT:
3609 * Grabs and releases workqueue_lock and gcwq->lock's.
3610 */
3611void freeze_workqueues_begin(void)
3612{
3613        unsigned int cpu;
3614
3615        spin_lock(&workqueue_lock);
3616
3617        BUG_ON(workqueue_freezing);
3618        workqueue_freezing = true;
3619
3620        for_each_gcwq_cpu(cpu) {
3621                struct global_cwq *gcwq = get_gcwq(cpu);
3622                struct workqueue_struct *wq;
3623
3624                spin_lock_irq(&gcwq->lock);
3625
3626                BUG_ON(gcwq->flags & GCWQ_FREEZING);
3627                gcwq->flags |= GCWQ_FREEZING;
3628
3629                list_for_each_entry(wq, &workqueues, list) {
3630                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3631
3632                        if (cwq && wq->flags & WQ_FREEZABLE)
3633                                cwq->max_active = 0;
3634                }
3635
3636                spin_unlock_irq(&gcwq->lock);
3637        }
3638
3639        spin_unlock(&workqueue_lock);
3640}
3641
3642/**
3643 * freeze_workqueues_busy - are freezable workqueues still busy?
3644 *
3645 * Check whether freezing is complete.  This function must be called
3646 * between freeze_workqueues_begin() and thaw_workqueues().
3647 *
3648 * CONTEXT:
3649 * Grabs and releases workqueue_lock.
3650 *
3651 * RETURNS:
3652 * %true if some freezable workqueues are still busy.  %false if freezing
3653 * is complete.
3654 */
3655bool freeze_workqueues_busy(void)
3656{
3657        unsigned int cpu;
3658        bool busy = false;
3659
3660        spin_lock(&workqueue_lock);
3661
3662        BUG_ON(!workqueue_freezing);
3663
3664        for_each_gcwq_cpu(cpu) {
3665                struct workqueue_struct *wq;
3666                /*
3667                 * nr_active is monotonically decreasing.  It's safe
3668                 * to peek without lock.
3669                 */
3670                list_for_each_entry(wq, &workqueues, list) {
3671                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3672
3673                        if (!cwq || !(wq->flags & WQ_FREEZABLE))
3674                                continue;
3675
3676                        BUG_ON(cwq->nr_active < 0);
3677                        if (cwq->nr_active) {
3678                                busy = true;
3679                                goto out_unlock;
3680                        }
3681                }
3682        }
3683out_unlock:
3684        spin_unlock(&workqueue_lock);
3685        return busy;
3686}
3687
3688/**
3689 * thaw_workqueues - thaw workqueues
3690 *
3691 * Thaw workqueues.  Normal queueing is restored and all collected
3692 * frozen works are transferred to their respective gcwq worklists.
3693 *
3694 * CONTEXT:
3695 * Grabs and releases workqueue_lock and gcwq->lock's.
3696 */
3697void thaw_workqueues(void)
3698{
3699        unsigned int cpu;
3700
3701        spin_lock(&workqueue_lock);
3702
3703        if (!workqueue_freezing)
3704                goto out_unlock;
3705
3706        for_each_gcwq_cpu(cpu) {
3707                struct global_cwq *gcwq = get_gcwq(cpu);
3708                struct workqueue_struct *wq;
3709
3710                spin_lock_irq(&gcwq->lock);
3711
3712                BUG_ON(!(gcwq->flags & GCWQ_FREEZING));
3713                gcwq->flags &= ~GCWQ_FREEZING;
3714
3715                list_for_each_entry(wq, &workqueues, list) {
3716                        struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3717
3718                        if (!cwq || !(wq->flags & WQ_FREEZABLE))
3719                                continue;
3720
3721                        /* restore max_active and repopulate worklist */
3722                        cwq->max_active = wq->saved_max_active;
3723
3724                        while (!list_empty(&cwq->delayed_works) &&
3725                               cwq->nr_active < cwq->max_active)
3726                                cwq_activate_first_delayed(cwq);
3727                }
3728
3729                wake_up_worker(gcwq);
3730
3731                spin_unlock_irq(&gcwq->lock);
3732        }
3733
3734        workqueue_freezing = false;
3735out_unlock:
3736        spin_unlock(&workqueue_lock);
3737}
3738#endif /* CONFIG_FREEZER */
3739
3740static int __init init_workqueues(void)
3741{
3742        unsigned int cpu;
3743        int i;
3744
3745        cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
3746
3747        /* initialize gcwqs */
3748        for_each_gcwq_cpu(cpu) {
3749                struct global_cwq *gcwq = get_gcwq(cpu);
3750
3751                spin_lock_init(&gcwq->lock);
3752                INIT_LIST_HEAD(&gcwq->worklist);
3753                gcwq->cpu = cpu;
3754                gcwq->flags |= GCWQ_DISASSOCIATED;
3755
3756                INIT_LIST_HEAD(&gcwq->idle_list);
3757                for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3758                        INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3759
3760                init_timer_deferrable(&gcwq->idle_timer);
3761                gcwq->idle_timer.function = idle_worker_timeout;
3762                gcwq->idle_timer.data = (unsigned long)gcwq;
3763
3764                setup_timer(&gcwq->mayday_timer, gcwq_mayday_timeout,
3765                            (unsigned long)gcwq);
3766
3767                ida_init(&gcwq->worker_ida);
3768
3769                gcwq->trustee_state = TRUSTEE_DONE;
3770                init_waitqueue_head(&gcwq->trustee_wait);
3771        }
3772
3773        /* create the initial worker */
3774        for_each_online_gcwq_cpu(cpu) {
3775                struct global_cwq *gcwq = get_gcwq(cpu);
3776                struct worker *worker;
3777
3778                if (cpu != WORK_CPU_UNBOUND)
3779                        gcwq->flags &= ~GCWQ_DISASSOCIATED;
3780                worker = create_worker(gcwq, true);
3781                BUG_ON(!worker);
3782                spin_lock_irq(&gcwq->lock);
3783                start_worker(worker);
3784                spin_unlock_irq(&gcwq->lock);
3785        }
3786
3787        system_wq = alloc_workqueue("events", 0, 0);
3788        system_long_wq = alloc_workqueue("events_long", 0, 0);
3789        system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3790        system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3791                                            WQ_UNBOUND_MAX_ACTIVE);
3792        system_freezable_wq = alloc_workqueue("events_freezable",
3793                                              WQ_FREEZABLE, 0);
3794        BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
3795               !system_unbound_wq || !system_freezable_wq);
3796        return 0;
3797}
3798early_initcall(init_workqueues);
3799