linux/block/blk-ioc.c
<<
>>
Prefs
   1/*
   2 * Functions related to io context handling
   3 */
   4#include <linux/kernel.h>
   5#include <linux/module.h>
   6#include <linux/init.h>
   7#include <linux/bio.h>
   8#include <linux/blkdev.h>
   9#include <linux/bootmem.h>      /* for max_pfn/max_low_pfn */
  10#include <linux/slab.h>
  11
  12#include "blk.h"
  13
  14/*
  15 * For io context allocations
  16 */
  17static struct kmem_cache *iocontext_cachep;
  18
  19static void cfq_dtor(struct io_context *ioc)
  20{
  21        if (!hlist_empty(&ioc->cic_list)) {
  22                struct cfq_io_context *cic;
  23
  24                cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
  25                                                                cic_list);
  26                cic->dtor(ioc);
  27        }
  28}
  29
  30/*
  31 * IO Context helper functions. put_io_context() returns 1 if there are no
  32 * more users of this io context, 0 otherwise.
  33 */
  34int put_io_context(struct io_context *ioc)
  35{
  36        if (ioc == NULL)
  37                return 1;
  38
  39        BUG_ON(atomic_long_read(&ioc->refcount) == 0);
  40
  41        if (atomic_long_dec_and_test(&ioc->refcount)) {
  42                rcu_read_lock();
  43                cfq_dtor(ioc);
  44                rcu_read_unlock();
  45
  46                kmem_cache_free(iocontext_cachep, ioc);
  47                return 1;
  48        }
  49        return 0;
  50}
  51EXPORT_SYMBOL(put_io_context);
  52
  53static void cfq_exit(struct io_context *ioc)
  54{
  55        rcu_read_lock();
  56
  57        if (!hlist_empty(&ioc->cic_list)) {
  58                struct cfq_io_context *cic;
  59
  60                cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
  61                                                                cic_list);
  62                cic->exit(ioc);
  63        }
  64        rcu_read_unlock();
  65}
  66
  67/* Called by the exiting task */
  68void exit_io_context(struct task_struct *task)
  69{
  70        struct io_context *ioc;
  71
  72        task_lock(task);
  73        ioc = task->io_context;
  74        task->io_context = NULL;
  75        task_unlock(task);
  76
  77        if (atomic_dec_and_test(&ioc->nr_tasks))
  78                cfq_exit(ioc);
  79
  80        put_io_context(ioc);
  81}
  82
  83struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
  84{
  85        struct io_context *ret;
  86
  87        ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node);
  88        if (ret) {
  89                atomic_long_set(&ret->refcount, 1);
  90                atomic_set(&ret->nr_tasks, 1);
  91                spin_lock_init(&ret->lock);
  92                ret->ioprio_changed = 0;
  93                ret->ioprio = 0;
  94                ret->last_waited = 0; /* doesn't matter... */
  95                ret->nr_batch_requests = 0; /* because this is 0 */
  96                INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
  97                INIT_HLIST_HEAD(&ret->cic_list);
  98                ret->ioc_data = NULL;
  99        }
 100
 101        return ret;
 102}
 103
 104/*
 105 * If the current task has no IO context then create one and initialise it.
 106 * Otherwise, return its existing IO context.
 107 *
 108 * This returned IO context doesn't have a specifically elevated refcount,
 109 * but since the current task itself holds a reference, the context can be
 110 * used in general code, so long as it stays within `current` context.
 111 */
 112struct io_context *current_io_context(gfp_t gfp_flags, int node)
 113{
 114        struct task_struct *tsk = current;
 115        struct io_context *ret;
 116
 117        ret = tsk->io_context;
 118        if (likely(ret))
 119                return ret;
 120
 121        ret = alloc_io_context(gfp_flags, node);
 122        if (ret) {
 123                /* make sure set_task_ioprio() sees the settings above */
 124                smp_wmb();
 125                tsk->io_context = ret;
 126        }
 127
 128        return ret;
 129}
 130
 131/*
 132 * If the current task has no IO context then create one and initialise it.
 133 * If it does have a context, take a ref on it.
 134 *
 135 * This is always called in the context of the task which submitted the I/O.
 136 */
 137struct io_context *get_io_context(gfp_t gfp_flags, int node)
 138{
 139        struct io_context *ret = NULL;
 140
 141        /*
 142         * Check for unlikely race with exiting task. ioc ref count is
 143         * zero when ioc is being detached.
 144         */
 145        do {
 146                ret = current_io_context(gfp_flags, node);
 147                if (unlikely(!ret))
 148                        break;
 149        } while (!atomic_long_inc_not_zero(&ret->refcount));
 150
 151        return ret;
 152}
 153EXPORT_SYMBOL(get_io_context);
 154
 155static int __init blk_ioc_init(void)
 156{
 157        iocontext_cachep = kmem_cache_create("blkdev_ioc",
 158                        sizeof(struct io_context), 0, SLAB_PANIC, NULL);
 159        return 0;
 160}
 161subsys_initcall(blk_ioc_init);
 162