linux/kernel/task_work.c
<<
>>
Prefs
   1#include <linux/spinlock.h>
   2#include <linux/task_work.h>
   3#include <linux/tracehook.h>
   4
   5static struct callback_head work_exited; /* all we need is ->next == NULL */
   6
   7/**
   8 * task_work_add - ask the @task to execute @work->func()
   9 * @task: the task which should run the callback
  10 * @work: the callback to run
  11 * @notify: send the notification if true
  12 *
  13 * Queue @work for task_work_run() below and notify the @task if @notify.
  14 * Fails if the @task is exiting/exited and thus it can't process this @work.
  15 * Otherwise @work->func() will be called when the @task returns from kernel
  16 * mode or exits.
  17 *
  18 * This is like the signal handler which runs in kernel mode, but it doesn't
  19 * try to wake up the @task.
  20 *
  21 * Note: there is no ordering guarantee on works queued here.
  22 *
  23 * RETURNS:
  24 * 0 if succeeds or -ESRCH.
  25 */
  26int
  27task_work_add(struct task_struct *task, struct callback_head *work, bool notify)
  28{
  29        struct callback_head *head;
  30
  31        do {
  32                head = ACCESS_ONCE(task->task_works);
  33                if (unlikely(head == &work_exited))
  34                        return -ESRCH;
  35                work->next = head;
  36        } while (cmpxchg(&task->task_works, head, work) != head);
  37
  38        if (notify)
  39                set_notify_resume(task);
  40        return 0;
  41}
  42
  43/**
  44 * task_work_cancel - cancel a pending work added by task_work_add()
  45 * @task: the task which should execute the work
  46 * @func: identifies the work to remove
  47 *
  48 * Find the last queued pending work with ->func == @func and remove
  49 * it from queue.
  50 *
  51 * RETURNS:
  52 * The found work or NULL if not found.
  53 */
  54struct callback_head *
  55task_work_cancel(struct task_struct *task, task_work_func_t func)
  56{
  57        struct callback_head **pprev = &task->task_works;
  58        struct callback_head *work;
  59        unsigned long flags;
  60        /*
  61         * If cmpxchg() fails we continue without updating pprev.
  62         * Either we raced with task_work_add() which added the
  63         * new entry before this work, we will find it again. Or
  64         * we raced with task_work_run(), *pprev == NULL/exited.
  65         */
  66        raw_spin_lock_irqsave(&task->pi_lock, flags);
  67        while ((work = ACCESS_ONCE(*pprev))) {
  68                smp_read_barrier_depends();
  69                if (work->func != func)
  70                        pprev = &work->next;
  71                else if (cmpxchg(pprev, work, work->next) == work)
  72                        break;
  73        }
  74        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
  75
  76        return work;
  77}
  78
  79/**
  80 * task_work_run - execute the works added by task_work_add()
  81 *
  82 * Flush the pending works. Should be used by the core kernel code.
  83 * Called before the task returns to the user-mode or stops, or when
  84 * it exits. In the latter case task_work_add() can no longer add the
  85 * new work after task_work_run() returns.
  86 */
  87void task_work_run(void)
  88{
  89        struct task_struct *task = current;
  90        struct callback_head *work, *head, *next;
  91
  92        for (;;) {
  93                /*
  94                 * work->func() can do task_work_add(), do not set
  95                 * work_exited unless the list is empty.
  96                 */
  97                do {
  98                        work = ACCESS_ONCE(task->task_works);
  99                        head = !work && (task->flags & PF_EXITING) ?
 100                                &work_exited : NULL;
 101                } while (cmpxchg(&task->task_works, work, head) != work);
 102
 103                if (!work)
 104                        break;
 105                /*
 106                 * Synchronize with task_work_cancel(). It can't remove
 107                 * the first entry == work, cmpxchg(task_works) should
 108                 * fail, but it can play with *work and other entries.
 109                 */
 110                raw_spin_unlock_wait(&task->pi_lock);
 111                smp_mb();
 112
 113                do {
 114                        next = work->next;
 115                        work->func(work);
 116                        work = next;
 117                        cond_resched();
 118                } while (work);
 119        }
 120}
 121