linux/kernel/kthread.c
<<
>>
Prefs
   1/* Kernel thread helper functions.
   2 *   Copyright (C) 2004 IBM Corporation, Rusty Russell.
   3 *
   4 * Creation is done via kthreadd, so that we get a clean environment
   5 * even if we're invoked from userspace (think modprobe, hotplug cpu,
   6 * etc.).
   7 */
   8#include <linux/sched.h>
   9#include <linux/kthread.h>
  10#include <linux/completion.h>
  11#include <linux/err.h>
  12#include <linux/cpuset.h>
  13#include <linux/unistd.h>
  14#include <linux/file.h>
  15#include <linux/module.h>
  16#include <linux/mutex.h>
  17#include <linux/slab.h>
  18#include <linux/freezer.h>
  19#include <trace/events/sched.h>
  20
  21static DEFINE_SPINLOCK(kthread_create_lock);
  22static LIST_HEAD(kthread_create_list);
  23struct task_struct *kthreadd_task;
  24
  25struct kthread_create_info
  26{
  27        /* Information passed to kthread() from kthreadd. */
  28        int (*threadfn)(void *data);
  29        void *data;
  30
  31        /* Result passed back to kthread_create() from kthreadd. */
  32        struct task_struct *result;
  33        struct completion done;
  34
  35        struct list_head list;
  36};
  37
  38struct kthread {
  39        int should_stop;
  40        void *data;
  41        struct completion exited;
  42};
  43
  44#define to_kthread(tsk) \
  45        container_of((tsk)->vfork_done, struct kthread, exited)
  46
  47/**
  48 * kthread_should_stop - should this kthread return now?
  49 *
  50 * When someone calls kthread_stop() on your kthread, it will be woken
  51 * and this will return true.  You should then return, and your return
  52 * value will be passed through to kthread_stop().
  53 */
  54int kthread_should_stop(void)
  55{
  56        return to_kthread(current)->should_stop;
  57}
  58EXPORT_SYMBOL(kthread_should_stop);
  59
  60/**
  61 * kthread_data - return data value specified on kthread creation
  62 * @task: kthread task in question
  63 *
  64 * Return the data value specified when kthread @task was created.
  65 * The caller is responsible for ensuring the validity of @task when
  66 * calling this function.
  67 */
  68void *kthread_data(struct task_struct *task)
  69{
  70        return to_kthread(task)->data;
  71}
  72
  73static int kthread(void *_create)
  74{
  75        /* Copy data: it's on kthread's stack */
  76        struct kthread_create_info *create = _create;
  77        int (*threadfn)(void *data) = create->threadfn;
  78        void *data = create->data;
  79        struct kthread self;
  80        int ret;
  81
  82        self.should_stop = 0;
  83        self.data = data;
  84        init_completion(&self.exited);
  85        current->vfork_done = &self.exited;
  86
  87        /* OK, tell user we're spawned, wait for stop or wakeup */
  88        __set_current_state(TASK_UNINTERRUPTIBLE);
  89        create->result = current;
  90        complete(&create->done);
  91        schedule();
  92
  93        ret = -EINTR;
  94        if (!self.should_stop)
  95                ret = threadfn(data);
  96
  97        /* we can't just return, we must preserve "self" on stack */
  98        do_exit(ret);
  99}
 100
 101static void create_kthread(struct kthread_create_info *create)
 102{
 103        int pid;
 104
 105        /* We want our own signal handler (we take no signals by default). */
 106        pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
 107        if (pid < 0) {
 108                create->result = ERR_PTR(pid);
 109                complete(&create->done);
 110        }
 111}
 112
 113/**
 114 * kthread_create - create a kthread.
 115 * @threadfn: the function to run until signal_pending(current).
 116 * @data: data ptr for @threadfn.
 117 * @namefmt: printf-style name for the thread.
 118 *
 119 * Description: This helper function creates and names a kernel
 120 * thread.  The thread will be stopped: use wake_up_process() to start
 121 * it.  See also kthread_run().
 122 *
 123 * When woken, the thread will run @threadfn() with @data as its
 124 * argument. @threadfn() can either call do_exit() directly if it is a
 125 * standalone thread for which noone will call kthread_stop(), or
 126 * return when 'kthread_should_stop()' is true (which means
 127 * kthread_stop() has been called).  The return value should be zero
 128 * or a negative error number; it will be passed to kthread_stop().
 129 *
 130 * Returns a task_struct or ERR_PTR(-ENOMEM).
 131 */
 132struct task_struct *kthread_create(int (*threadfn)(void *data),
 133                                   void *data,
 134                                   const char namefmt[],
 135                                   ...)
 136{
 137        struct kthread_create_info create;
 138
 139        create.threadfn = threadfn;
 140        create.data = data;
 141        init_completion(&create.done);
 142
 143        spin_lock(&kthread_create_lock);
 144        list_add_tail(&create.list, &kthread_create_list);
 145        spin_unlock(&kthread_create_lock);
 146
 147        wake_up_process(kthreadd_task);
 148        wait_for_completion(&create.done);
 149
 150        if (!IS_ERR(create.result)) {
 151                static const struct sched_param param = { .sched_priority = 0 };
 152                va_list args;
 153
 154                va_start(args, namefmt);
 155                vsnprintf(create.result->comm, sizeof(create.result->comm),
 156                          namefmt, args);
 157                va_end(args);
 158                /*
 159                 * root may have changed our (kthreadd's) priority or CPU mask.
 160                 * The kernel thread should not inherit these properties.
 161                 */
 162                sched_setscheduler_nocheck(create.result, SCHED_NORMAL, &param);
 163                set_cpus_allowed_ptr(create.result, cpu_all_mask);
 164        }
 165        return create.result;
 166}
 167EXPORT_SYMBOL(kthread_create);
 168
 169/**
 170 * kthread_bind - bind a just-created kthread to a cpu.
 171 * @p: thread created by kthread_create().
 172 * @cpu: cpu (might not be online, must be possible) for @k to run on.
 173 *
 174 * Description: This function is equivalent to set_cpus_allowed(),
 175 * except that @cpu doesn't need to be online, and the thread must be
 176 * stopped (i.e., just returned from kthread_create()).
 177 */
 178void kthread_bind(struct task_struct *p, unsigned int cpu)
 179{
 180        /* Must have done schedule() in kthread() before we set_task_cpu */
 181        if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
 182                WARN_ON(1);
 183                return;
 184        }
 185
 186        p->cpus_allowed = cpumask_of_cpu(cpu);
 187        p->rt.nr_cpus_allowed = 1;
 188        p->flags |= PF_THREAD_BOUND;
 189}
 190EXPORT_SYMBOL(kthread_bind);
 191
 192/**
 193 * kthread_stop - stop a thread created by kthread_create().
 194 * @k: thread created by kthread_create().
 195 *
 196 * Sets kthread_should_stop() for @k to return true, wakes it, and
 197 * waits for it to exit. This can also be called after kthread_create()
 198 * instead of calling wake_up_process(): the thread will exit without
 199 * calling threadfn().
 200 *
 201 * If threadfn() may call do_exit() itself, the caller must ensure
 202 * task_struct can't go away.
 203 *
 204 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
 205 * was never called.
 206 */
 207int kthread_stop(struct task_struct *k)
 208{
 209        struct kthread *kthread;
 210        int ret;
 211
 212        trace_sched_kthread_stop(k);
 213        get_task_struct(k);
 214
 215        kthread = to_kthread(k);
 216        barrier(); /* it might have exited */
 217        if (k->vfork_done != NULL) {
 218                kthread->should_stop = 1;
 219                wake_up_process(k);
 220                wait_for_completion(&kthread->exited);
 221        }
 222        ret = k->exit_code;
 223
 224        put_task_struct(k);
 225        trace_sched_kthread_stop_ret(ret);
 226
 227        return ret;
 228}
 229EXPORT_SYMBOL(kthread_stop);
 230
 231int kthreadd(void *unused)
 232{
 233        struct task_struct *tsk = current;
 234
 235        /* Setup a clean context for our children to inherit. */
 236        set_task_comm(tsk, "kthreadd");
 237        ignore_signals(tsk);
 238        set_cpus_allowed_ptr(tsk, cpu_all_mask);
 239        set_mems_allowed(node_states[N_HIGH_MEMORY]);
 240
 241        current->flags |= PF_NOFREEZE | PF_FREEZER_NOSIG;
 242
 243        for (;;) {
 244                set_current_state(TASK_INTERRUPTIBLE);
 245                if (list_empty(&kthread_create_list))
 246                        schedule();
 247                __set_current_state(TASK_RUNNING);
 248
 249                spin_lock(&kthread_create_lock);
 250                while (!list_empty(&kthread_create_list)) {
 251                        struct kthread_create_info *create;
 252
 253                        create = list_entry(kthread_create_list.next,
 254                                            struct kthread_create_info, list);
 255                        list_del_init(&create->list);
 256                        spin_unlock(&kthread_create_lock);
 257
 258                        create_kthread(create);
 259
 260                        spin_lock(&kthread_create_lock);
 261                }
 262                spin_unlock(&kthread_create_lock);
 263        }
 264
 265        return 0;
 266}
 267
 268void __init_kthread_worker(struct kthread_worker *worker,
 269                                const char *name,
 270                                struct lock_class_key *key)
 271{
 272        spin_lock_init(&worker->lock);
 273        lockdep_set_class_and_name(&worker->lock, key, name);
 274        INIT_LIST_HEAD(&worker->work_list);
 275        worker->task = NULL;
 276}
 277EXPORT_SYMBOL_GPL(__init_kthread_worker);
 278
 279/**
 280 * kthread_worker_fn - kthread function to process kthread_worker
 281 * @worker_ptr: pointer to initialized kthread_worker
 282 *
 283 * This function can be used as @threadfn to kthread_create() or
 284 * kthread_run() with @worker_ptr argument pointing to an initialized
 285 * kthread_worker.  The started kthread will process work_list until
 286 * the it is stopped with kthread_stop().  A kthread can also call
 287 * this function directly after extra initialization.
 288 *
 289 * Different kthreads can be used for the same kthread_worker as long
 290 * as there's only one kthread attached to it at any given time.  A
 291 * kthread_worker without an attached kthread simply collects queued
 292 * kthread_works.
 293 */
 294int kthread_worker_fn(void *worker_ptr)
 295{
 296        struct kthread_worker *worker = worker_ptr;
 297        struct kthread_work *work;
 298
 299        WARN_ON(worker->task);
 300        worker->task = current;
 301repeat:
 302        set_current_state(TASK_INTERRUPTIBLE);  /* mb paired w/ kthread_stop */
 303
 304        if (kthread_should_stop()) {
 305                __set_current_state(TASK_RUNNING);
 306                spin_lock_irq(&worker->lock);
 307                worker->task = NULL;
 308                spin_unlock_irq(&worker->lock);
 309                return 0;
 310        }
 311
 312        work = NULL;
 313        spin_lock_irq(&worker->lock);
 314        if (!list_empty(&worker->work_list)) {
 315                work = list_first_entry(&worker->work_list,
 316                                        struct kthread_work, node);
 317                list_del_init(&work->node);
 318        }
 319        spin_unlock_irq(&worker->lock);
 320
 321        if (work) {
 322                __set_current_state(TASK_RUNNING);
 323                work->func(work);
 324                smp_wmb();      /* wmb worker-b0 paired with flush-b1 */
 325                work->done_seq = work->queue_seq;
 326                smp_mb();       /* mb worker-b1 paired with flush-b0 */
 327                if (atomic_read(&work->flushing))
 328                        wake_up_all(&work->done);
 329        } else if (!freezing(current))
 330                schedule();
 331
 332        try_to_freeze();
 333        goto repeat;
 334}
 335EXPORT_SYMBOL_GPL(kthread_worker_fn);
 336
 337/**
 338 * queue_kthread_work - queue a kthread_work
 339 * @worker: target kthread_worker
 340 * @work: kthread_work to queue
 341 *
 342 * Queue @work to work processor @task for async execution.  @task
 343 * must have been created with kthread_worker_create().  Returns %true
 344 * if @work was successfully queued, %false if it was already pending.
 345 */
 346bool queue_kthread_work(struct kthread_worker *worker,
 347                        struct kthread_work *work)
 348{
 349        bool ret = false;
 350        unsigned long flags;
 351
 352        spin_lock_irqsave(&worker->lock, flags);
 353        if (list_empty(&work->node)) {
 354                list_add_tail(&work->node, &worker->work_list);
 355                work->queue_seq++;
 356                if (likely(worker->task))
 357                        wake_up_process(worker->task);
 358                ret = true;
 359        }
 360        spin_unlock_irqrestore(&worker->lock, flags);
 361        return ret;
 362}
 363EXPORT_SYMBOL_GPL(queue_kthread_work);
 364
 365/**
 366 * flush_kthread_work - flush a kthread_work
 367 * @work: work to flush
 368 *
 369 * If @work is queued or executing, wait for it to finish execution.
 370 */
 371void flush_kthread_work(struct kthread_work *work)
 372{
 373        int seq = work->queue_seq;
 374
 375        atomic_inc(&work->flushing);
 376
 377        /*
 378         * mb flush-b0 paired with worker-b1, to make sure either
 379         * worker sees the above increment or we see done_seq update.
 380         */
 381        smp_mb__after_atomic_inc();
 382
 383        /* A - B <= 0 tests whether B is in front of A regardless of overflow */
 384        wait_event(work->done, seq - work->done_seq <= 0);
 385        atomic_dec(&work->flushing);
 386
 387        /*
 388         * rmb flush-b1 paired with worker-b0, to make sure our caller
 389         * sees every change made by work->func().
 390         */
 391        smp_mb__after_atomic_dec();
 392}
 393EXPORT_SYMBOL_GPL(flush_kthread_work);
 394
 395struct kthread_flush_work {
 396        struct kthread_work     work;
 397        struct completion       done;
 398};
 399
 400static void kthread_flush_work_fn(struct kthread_work *work)
 401{
 402        struct kthread_flush_work *fwork =
 403                container_of(work, struct kthread_flush_work, work);
 404        complete(&fwork->done);
 405}
 406
 407/**
 408 * flush_kthread_worker - flush all current works on a kthread_worker
 409 * @worker: worker to flush
 410 *
 411 * Wait until all currently executing or pending works on @worker are
 412 * finished.
 413 */
 414void flush_kthread_worker(struct kthread_worker *worker)
 415{
 416        struct kthread_flush_work fwork = {
 417                KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
 418                COMPLETION_INITIALIZER_ONSTACK(fwork.done),
 419        };
 420
 421        queue_kthread_work(worker, &fwork.work);
 422        wait_for_completion(&fwork.done);
 423}
 424EXPORT_SYMBOL_GPL(flush_kthread_worker);
 425