linux/include/linux/idr.h
<<
>>
Prefs
   1/*
   2 * include/linux/idr.h
   3 * 
   4 * 2002-10-18  written by Jim Houston jim.houston@ccur.com
   5 *      Copyright (C) 2002 by Concurrent Computer Corporation
   6 *      Distributed under the GNU GPL license version 2.
   7 *
   8 * Small id to pointer translation service avoiding fixed sized
   9 * tables.
  10 */
  11
  12#ifndef __IDR_H__
  13#define __IDR_H__
  14
  15#include <linux/types.h>
  16#include <linux/bitops.h>
  17#include <linux/init.h>
  18#include <linux/rcupdate.h>
  19
  20/*
  21 * We want shallower trees and thus more bits covered at each layer.  8
  22 * bits gives us large enough first layer for most use cases and maximum
  23 * tree depth of 4.  Each idr_layer is slightly larger than 2k on 64bit and
  24 * 1k on 32bit.
  25 */
  26#define IDR_BITS 8
  27#define IDR_SIZE (1 << IDR_BITS)
  28#define IDR_MASK ((1 << IDR_BITS)-1)
  29
  30struct idr_layer {
  31        int                     prefix; /* the ID prefix of this idr_layer */
  32        int                     layer;  /* distance from leaf */
  33        struct idr_layer __rcu  *ary[1<<IDR_BITS];
  34        int                     count;  /* When zero, we can release it */
  35        union {
  36                /* A zero bit means "space here" */
  37                DECLARE_BITMAP(bitmap, IDR_SIZE);
  38                struct rcu_head         rcu_head;
  39        };
  40};
  41
  42struct idr {
  43        struct idr_layer __rcu  *hint;  /* the last layer allocated from */
  44        struct idr_layer __rcu  *top;
  45        int                     layers; /* only valid w/o concurrent changes */
  46        int                     cur;    /* current pos for cyclic allocation */
  47        spinlock_t              lock;
  48        int                     id_free_cnt;
  49        struct idr_layer        *id_free;
  50};
  51
  52#define IDR_INIT(name)                                                  \
  53{                                                                       \
  54        .lock                   = __SPIN_LOCK_UNLOCKED(name.lock),      \
  55}
  56#define DEFINE_IDR(name)        struct idr name = IDR_INIT(name)
  57
  58/**
  59 * DOC: idr sync
  60 * idr synchronization (stolen from radix-tree.h)
  61 *
  62 * idr_find() is able to be called locklessly, using RCU. The caller must
  63 * ensure calls to this function are made within rcu_read_lock() regions.
  64 * Other readers (lock-free or otherwise) and modifications may be running
  65 * concurrently.
  66 *
  67 * It is still required that the caller manage the synchronization and
  68 * lifetimes of the items. So if RCU lock-free lookups are used, typically
  69 * this would mean that the items have their own locks, or are amenable to
  70 * lock-free access; and that the items are freed by RCU (or only freed after
  71 * having been deleted from the idr tree *and* a synchronize_rcu() grace
  72 * period).
  73 */
  74
  75/*
  76 * This is what we export.
  77 */
  78
  79void *idr_find_slowpath(struct idr *idp, int id);
  80void idr_preload(gfp_t gfp_mask);
  81int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
  82int idr_alloc_cyclic(struct idr *idr, void *ptr, int start, int end, gfp_t gfp_mask);
  83int idr_for_each(struct idr *idp,
  84                 int (*fn)(int id, void *p, void *data), void *data);
  85void *idr_get_next(struct idr *idp, int *nextid);
  86void *idr_replace(struct idr *idp, void *ptr, int id);
  87void idr_remove(struct idr *idp, int id);
  88void idr_destroy(struct idr *idp);
  89void idr_init(struct idr *idp);
  90bool idr_is_empty(struct idr *idp);
  91
  92/**
  93 * idr_preload_end - end preload section started with idr_preload()
  94 *
  95 * Each idr_preload() should be matched with an invocation of this
  96 * function.  See idr_preload() for details.
  97 */
  98static inline void idr_preload_end(void)
  99{
 100        preempt_enable();
 101}
 102
 103/**
 104 * idr_find - return pointer for given id
 105 * @idr: idr handle
 106 * @id: lookup key
 107 *
 108 * Return the pointer given the id it has been registered with.  A %NULL
 109 * return indicates that @id is not valid or you passed %NULL in
 110 * idr_get_new().
 111 *
 112 * This function can be called under rcu_read_lock(), given that the leaf
 113 * pointers lifetimes are correctly managed.
 114 */
 115static inline void *idr_find(struct idr *idr, int id)
 116{
 117        struct idr_layer *hint = rcu_dereference_raw(idr->hint);
 118
 119        if (hint && (id & ~IDR_MASK) == hint->prefix)
 120                return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
 121
 122        return idr_find_slowpath(idr, id);
 123}
 124
 125/**
 126 * idr_for_each_entry - iterate over an idr's elements of a given type
 127 * @idp:     idr handle
 128 * @entry:   the type * to use as cursor
 129 * @id:      id entry's key
 130 *
 131 * @entry and @id do not need to be initialized before the loop, and
 132 * after normal terminatinon @entry is left with the value NULL.  This
 133 * is convenient for a "not found" value.
 134 */
 135#define idr_for_each_entry(idp, entry, id)                      \
 136        for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
 137
 138/**
 139 * idr_for_each_entry - continue iteration over an idr's elements of a given type
 140 * @idp:     idr handle
 141 * @entry:   the type * to use as cursor
 142 * @id:      id entry's key
 143 *
 144 * Continue to iterate over list of given type, continuing after
 145 * the current position.
 146 */
 147#define idr_for_each_entry_continue(idp, entry, id)                     \
 148        for ((entry) = idr_get_next((idp), &(id));                      \
 149             entry;                                                     \
 150             ++id, (entry) = idr_get_next((idp), &(id)))
 151
 152/*
 153 * IDA - IDR based id allocator, use when translation from id to
 154 * pointer isn't necessary.
 155 *
 156 * IDA_BITMAP_LONGS is calculated to be one less to accommodate
 157 * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
 158 */
 159#define IDA_CHUNK_SIZE          128     /* 128 bytes per chunk */
 160#define IDA_BITMAP_LONGS        (IDA_CHUNK_SIZE / sizeof(long) - 1)
 161#define IDA_BITMAP_BITS         (IDA_BITMAP_LONGS * sizeof(long) * 8)
 162
 163struct ida_bitmap {
 164        long                    nr_busy;
 165        unsigned long           bitmap[IDA_BITMAP_LONGS];
 166};
 167
 168struct ida {
 169        struct idr              idr;
 170        struct ida_bitmap       *free_bitmap;
 171};
 172
 173#define IDA_INIT(name)          { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
 174#define DEFINE_IDA(name)        struct ida name = IDA_INIT(name)
 175
 176int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
 177int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
 178void ida_remove(struct ida *ida, int id);
 179void ida_destroy(struct ida *ida);
 180void ida_init(struct ida *ida);
 181
 182int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
 183                   gfp_t gfp_mask);
 184void ida_simple_remove(struct ida *ida, unsigned int id);
 185
 186/**
 187 * ida_get_new - allocate new ID
 188 * @ida:        idr handle
 189 * @p_id:       pointer to the allocated handle
 190 *
 191 * Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
 192 */
 193static inline int ida_get_new(struct ida *ida, int *p_id)
 194{
 195        return ida_get_new_above(ida, 0, p_id);
 196}
 197
 198void __init idr_init_cache(void);
 199
 200#endif /* __IDR_H__ */
 201