linux/kernel/async.c
<<
>>
Prefs
   1/*
   2 * async.c: Asynchronous function calls for boot performance
   3 *
   4 * (C) Copyright 2009 Intel Corporation
   5 * Author: Arjan van de Ven <arjan@linux.intel.com>
   6 *
   7 * This program is free software; you can redistribute it and/or
   8 * modify it under the terms of the GNU General Public License
   9 * as published by the Free Software Foundation; version 2
  10 * of the License.
  11 */
  12
  13
  14/*
  15
  16Goals and Theory of Operation
  17
  18The primary goal of this feature is to reduce the kernel boot time,
  19by doing various independent hardware delays and discovery operations
  20decoupled and not strictly serialized.
  21
  22More specifically, the asynchronous function call concept allows
  23certain operations (primarily during system boot) to happen
  24asynchronously, out of order, while these operations still
  25have their externally visible parts happen sequentially and in-order.
  26(not unlike how out-of-order CPUs retire their instructions in order)
  27
  28Key to the asynchronous function call implementation is the concept of
  29a "sequence cookie" (which, although it has an abstracted type, can be
  30thought of as a monotonically incrementing number).
  31
  32The async core will assign each scheduled event such a sequence cookie and
  33pass this to the called functions.
  34
  35The asynchronously called function should before doing a globally visible
  36operation, such as registering device numbers, call the
  37async_synchronize_cookie() function and pass in its own cookie. The
  38async_synchronize_cookie() function will make sure that all asynchronous
  39operations that were scheduled prior to the operation corresponding with the
  40cookie have completed.
  41
  42Subsystem/driver initialization code that scheduled asynchronous probe
  43functions, but which shares global resources with other drivers/subsystems
  44that do not use the asynchronous call feature, need to do a full
  45synchronization with the async_synchronize_full() function, before returning
  46from their init function. This is to maintain strict ordering between the
  47asynchronous and synchronous parts of the kernel.
  48
  49*/
  50
  51#include <linux/async.h>
  52#include <linux/bug.h>
  53#include <linux/module.h>
  54#include <linux/wait.h>
  55#include <linux/sched.h>
  56#include <linux/init.h>
  57#include <linux/kthread.h>
  58#include <linux/delay.h>
  59#include <asm/atomic.h>
  60
  61static async_cookie_t next_cookie = 1;
  62
  63#define MAX_THREADS     256
  64#define MAX_WORK        32768
  65
  66static LIST_HEAD(async_pending);
  67static LIST_HEAD(async_running);
  68static DEFINE_SPINLOCK(async_lock);
  69
  70static int async_enabled = 0;
  71
  72struct async_entry {
  73        struct list_head list;
  74        async_cookie_t   cookie;
  75        async_func_ptr   *func;
  76        void             *data;
  77        struct list_head *running;
  78};
  79
  80static DECLARE_WAIT_QUEUE_HEAD(async_done);
  81static DECLARE_WAIT_QUEUE_HEAD(async_new);
  82
  83static atomic_t entry_count;
  84static atomic_t thread_count;
  85
  86extern int initcall_debug;
  87
  88
  89/*
  90 * MUST be called with the lock held!
  91 */
  92static async_cookie_t  __lowest_in_progress(struct list_head *running)
  93{
  94        struct async_entry *entry;
  95
  96        if (!list_empty(running)) {
  97                entry = list_first_entry(running,
  98                        struct async_entry, list);
  99                return entry->cookie;
 100        }
 101
 102        list_for_each_entry(entry, &async_pending, list)
 103                if (entry->running == running)
 104                        return entry->cookie;
 105
 106        return next_cookie;     /* "infinity" value */
 107}
 108
 109static async_cookie_t  lowest_in_progress(struct list_head *running)
 110{
 111        unsigned long flags;
 112        async_cookie_t ret;
 113
 114        spin_lock_irqsave(&async_lock, flags);
 115        ret = __lowest_in_progress(running);
 116        spin_unlock_irqrestore(&async_lock, flags);
 117        return ret;
 118}
 119/*
 120 * pick the first pending entry and run it
 121 */
 122static void run_one_entry(void)
 123{
 124        unsigned long flags;
 125        struct async_entry *entry;
 126        ktime_t calltime, delta, rettime;
 127
 128        /* 1) pick one task from the pending queue */
 129
 130        spin_lock_irqsave(&async_lock, flags);
 131        if (list_empty(&async_pending))
 132                goto out;
 133        entry = list_first_entry(&async_pending, struct async_entry, list);
 134
 135        /* 2) move it to the running queue */
 136        list_move_tail(&entry->list, entry->running);
 137        spin_unlock_irqrestore(&async_lock, flags);
 138
 139        /* 3) run it (and print duration)*/
 140        if (initcall_debug && system_state == SYSTEM_BOOTING) {
 141                printk("calling  %lli_%pF @ %i\n", (long long)entry->cookie,
 142                        entry->func, task_pid_nr(current));
 143                calltime = ktime_get();
 144        }
 145        entry->func(entry->data, entry->cookie);
 146        if (initcall_debug && system_state == SYSTEM_BOOTING) {
 147                rettime = ktime_get();
 148                delta = ktime_sub(rettime, calltime);
 149                printk("initcall %lli_%pF returned 0 after %lld usecs\n",
 150                        (long long)entry->cookie,
 151                        entry->func,
 152                        (long long)ktime_to_ns(delta) >> 10);
 153        }
 154
 155        /* 4) remove it from the running queue */
 156        spin_lock_irqsave(&async_lock, flags);
 157        list_del(&entry->list);
 158
 159        /* 5) free the entry  */
 160        kfree(entry);
 161        atomic_dec(&entry_count);
 162
 163        spin_unlock_irqrestore(&async_lock, flags);
 164
 165        /* 6) wake up any waiters. */
 166        wake_up(&async_done);
 167        return;
 168
 169out:
 170        spin_unlock_irqrestore(&async_lock, flags);
 171}
 172
 173
 174static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running)
 175{
 176        struct async_entry *entry;
 177        unsigned long flags;
 178        async_cookie_t newcookie;
 179        
 180
 181        /* allow irq-off callers */
 182        entry = kzalloc(sizeof(struct async_entry), GFP_ATOMIC);
 183
 184        /*
 185         * If we're out of memory or if there's too much work
 186         * pending already, we execute synchronously.
 187         */
 188        if (!async_enabled || !entry || atomic_read(&entry_count) > MAX_WORK) {
 189                kfree(entry);
 190                spin_lock_irqsave(&async_lock, flags);
 191                newcookie = next_cookie++;
 192                spin_unlock_irqrestore(&async_lock, flags);
 193
 194                /* low on memory.. run synchronously */
 195                ptr(data, newcookie);
 196                return newcookie;
 197        }
 198        entry->func = ptr;
 199        entry->data = data;
 200        entry->running = running;
 201
 202        spin_lock_irqsave(&async_lock, flags);
 203        newcookie = entry->cookie = next_cookie++;
 204        list_add_tail(&entry->list, &async_pending);
 205        atomic_inc(&entry_count);
 206        spin_unlock_irqrestore(&async_lock, flags);
 207        wake_up(&async_new);
 208        return newcookie;
 209}
 210
 211/**
 212 * async_schedule - schedule a function for asynchronous execution
 213 * @ptr: function to execute asynchronously
 214 * @data: data pointer to pass to the function
 215 *
 216 * Returns an async_cookie_t that may be used for checkpointing later.
 217 * Note: This function may be called from atomic or non-atomic contexts.
 218 */
 219async_cookie_t async_schedule(async_func_ptr *ptr, void *data)
 220{
 221        return __async_schedule(ptr, data, &async_running);
 222}
 223EXPORT_SYMBOL_GPL(async_schedule);
 224
 225/**
 226 * async_schedule_domain - schedule a function for asynchronous execution within a certain domain
 227 * @ptr: function to execute asynchronously
 228 * @data: data pointer to pass to the function
 229 * @running: running list for the domain
 230 *
 231 * Returns an async_cookie_t that may be used for checkpointing later.
 232 * @running may be used in the async_synchronize_*_domain() functions
 233 * to wait within a certain synchronization domain rather than globally.
 234 * A synchronization domain is specified via the running queue @running to use.
 235 * Note: This function may be called from atomic or non-atomic contexts.
 236 */
 237async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data,
 238                                     struct list_head *running)
 239{
 240        return __async_schedule(ptr, data, running);
 241}
 242EXPORT_SYMBOL_GPL(async_schedule_domain);
 243
 244/**
 245 * async_synchronize_full - synchronize all asynchronous function calls
 246 *
 247 * This function waits until all asynchronous function calls have been done.
 248 */
 249void async_synchronize_full(void)
 250{
 251        do {
 252                async_synchronize_cookie(next_cookie);
 253        } while (!list_empty(&async_running) || !list_empty(&async_pending));
 254}
 255EXPORT_SYMBOL_GPL(async_synchronize_full);
 256
 257/**
 258 * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain
 259 * @list: running list to synchronize on
 260 *
 261 * This function waits until all asynchronous function calls for the
 262 * synchronization domain specified by the running list @list have been done.
 263 */
 264void async_synchronize_full_domain(struct list_head *list)
 265{
 266        async_synchronize_cookie_domain(next_cookie, list);
 267}
 268EXPORT_SYMBOL_GPL(async_synchronize_full_domain);
 269
 270/**
 271 * async_synchronize_cookie_domain - synchronize asynchronous function calls within a certain domain with cookie checkpointing
 272 * @cookie: async_cookie_t to use as checkpoint
 273 * @running: running list to synchronize on
 274 *
 275 * This function waits until all asynchronous function calls for the
 276 * synchronization domain specified by the running list @list submitted
 277 * prior to @cookie have been done.
 278 */
 279void async_synchronize_cookie_domain(async_cookie_t cookie,
 280                                     struct list_head *running)
 281{
 282        ktime_t starttime, delta, endtime;
 283
 284        if (initcall_debug && system_state == SYSTEM_BOOTING) {
 285                printk("async_waiting @ %i\n", task_pid_nr(current));
 286                starttime = ktime_get();
 287        }
 288
 289        wait_event(async_done, lowest_in_progress(running) >= cookie);
 290
 291        if (initcall_debug && system_state == SYSTEM_BOOTING) {
 292                endtime = ktime_get();
 293                delta = ktime_sub(endtime, starttime);
 294
 295                printk("async_continuing @ %i after %lli usec\n",
 296                        task_pid_nr(current),
 297                        (long long)ktime_to_ns(delta) >> 10);
 298        }
 299}
 300EXPORT_SYMBOL_GPL(async_synchronize_cookie_domain);
 301
 302/**
 303 * async_synchronize_cookie - synchronize asynchronous function calls with cookie checkpointing
 304 * @cookie: async_cookie_t to use as checkpoint
 305 *
 306 * This function waits until all asynchronous function calls prior to @cookie
 307 * have been done.
 308 */
 309void async_synchronize_cookie(async_cookie_t cookie)
 310{
 311        async_synchronize_cookie_domain(cookie, &async_running);
 312}
 313EXPORT_SYMBOL_GPL(async_synchronize_cookie);
 314
 315
 316static int async_thread(void *unused)
 317{
 318        DECLARE_WAITQUEUE(wq, current);
 319        add_wait_queue(&async_new, &wq);
 320
 321        while (!kthread_should_stop()) {
 322                int ret = HZ;
 323                set_current_state(TASK_INTERRUPTIBLE);
 324                /*
 325                 * check the list head without lock.. false positives
 326                 * are dealt with inside run_one_entry() while holding
 327                 * the lock.
 328                 */
 329                rmb();
 330                if (!list_empty(&async_pending))
 331                        run_one_entry();
 332                else
 333                        ret = schedule_timeout(HZ);
 334
 335                if (ret == 0) {
 336                        /*
 337                         * we timed out, this means we as thread are redundant.
 338                         * we sign off and die, but we to avoid any races there
 339                         * is a last-straw check to see if work snuck in.
 340                         */
 341                        atomic_dec(&thread_count);
 342                        wmb(); /* manager must see our departure first */
 343                        if (list_empty(&async_pending))
 344                                break;
 345                        /*
 346                         * woops work came in between us timing out and us
 347                         * signing off; we need to stay alive and keep working.
 348                         */
 349                        atomic_inc(&thread_count);
 350                }
 351        }
 352        remove_wait_queue(&async_new, &wq);
 353
 354        return 0;
 355}
 356
 357static int async_manager_thread(void *unused)
 358{
 359        DECLARE_WAITQUEUE(wq, current);
 360        add_wait_queue(&async_new, &wq);
 361
 362        while (!kthread_should_stop()) {
 363                int tc, ec;
 364
 365                set_current_state(TASK_INTERRUPTIBLE);
 366
 367                tc = atomic_read(&thread_count);
 368                rmb();
 369                ec = atomic_read(&entry_count);
 370
 371                while (tc < ec && tc < MAX_THREADS) {
 372                        if (IS_ERR(kthread_run(async_thread, NULL, "async/%i",
 373                                               tc))) {
 374                                msleep(100);
 375                                continue;
 376                        }
 377                        atomic_inc(&thread_count);
 378                        tc++;
 379                }
 380
 381                schedule();
 382        }
 383        remove_wait_queue(&async_new, &wq);
 384
 385        return 0;
 386}
 387
 388static int __init async_init(void)
 389{
 390        async_enabled =
 391                !IS_ERR(kthread_run(async_manager_thread, NULL, "async/mgr"));
 392
 393        WARN_ON(!async_enabled);
 394        return 0;
 395}
 396
 397core_initcall(async_init);
 398