linux/include/linux/idr.h
<<
>>
Prefs
   1/*
   2 * include/linux/idr.h
   3 * 
   4 * 2002-10-18  written by Jim Houston jim.houston@ccur.com
   5 *      Copyright (C) 2002 by Concurrent Computer Corporation
   6 *      Distributed under the GNU GPL license version 2.
   7 *
   8 * Small id to pointer translation service avoiding fixed sized
   9 * tables.
  10 */
  11
  12#ifndef __IDR_H__
  13#define __IDR_H__
  14
  15#include <linux/types.h>
  16#include <linux/bitops.h>
  17#include <linux/init.h>
  18#include <linux/rcupdate.h>
  19
  20#if BITS_PER_LONG == 32
  21# define IDR_BITS 5
  22# define IDR_FULL 0xfffffffful
  23/* We can only use two of the bits in the top level because there is
  24   only one possible bit in the top level (5 bits * 7 levels = 35
  25   bits, but you only use 31 bits in the id). */
  26# define TOP_LEVEL_FULL (IDR_FULL >> 30)
  27#elif BITS_PER_LONG == 64
  28# define IDR_BITS 6
  29# define IDR_FULL 0xfffffffffffffffful
  30/* We can only use two of the bits in the top level because there is
  31   only one possible bit in the top level (6 bits * 6 levels = 36
  32   bits, but you only use 31 bits in the id). */
  33# define TOP_LEVEL_FULL (IDR_FULL >> 62)
  34#else
  35# error "BITS_PER_LONG is not 32 or 64"
  36#endif
  37
  38#define IDR_SIZE (1 << IDR_BITS)
  39#define IDR_MASK ((1 << IDR_BITS)-1)
  40
  41#define MAX_ID_SHIFT (sizeof(int)*8 - 1)
  42#define MAX_ID_BIT (1U << MAX_ID_SHIFT)
  43#define MAX_ID_MASK (MAX_ID_BIT - 1)
  44
  45/* Leave the possibility of an incomplete final layer */
  46#define MAX_LEVEL (MAX_ID_SHIFT + IDR_BITS - 1) / IDR_BITS
  47
  48/* Number of id_layer structs to leave in free list */
  49#define IDR_FREE_MAX MAX_LEVEL + MAX_LEVEL
  50
  51struct idr_layer {
  52        unsigned long            bitmap; /* A zero bit means "space here" */
  53        struct idr_layer __rcu  *ary[1<<IDR_BITS];
  54        int                      count;  /* When zero, we can release it */
  55        int                      layer;  /* distance from leaf */
  56        struct rcu_head          rcu_head;
  57};
  58
  59struct idr {
  60        struct idr_layer __rcu *top;
  61        struct idr_layer *id_free;
  62        int               layers; /* only valid without concurrent changes */
  63        int               id_free_cnt;
  64        spinlock_t        lock;
  65};
  66
  67#define IDR_INIT(name)                                          \
  68{                                                               \
  69        .top            = NULL,                                 \
  70        .id_free        = NULL,                                 \
  71        .layers         = 0,                                    \
  72        .id_free_cnt    = 0,                                    \
  73        .lock           = __SPIN_LOCK_UNLOCKED(name.lock),      \
  74}
  75#define DEFINE_IDR(name)        struct idr name = IDR_INIT(name)
  76
  77/* Actions to be taken after a call to _idr_sub_alloc */
  78#define IDR_NEED_TO_GROW -2
  79#define IDR_NOMORE_SPACE -3
  80
  81#define _idr_rc_to_errno(rc) ((rc) == -1 ? -EAGAIN : -ENOSPC)
  82
  83/**
  84 * DOC: idr sync
  85 * idr synchronization (stolen from radix-tree.h)
  86 *
  87 * idr_find() is able to be called locklessly, using RCU. The caller must
  88 * ensure calls to this function are made within rcu_read_lock() regions.
  89 * Other readers (lock-free or otherwise) and modifications may be running
  90 * concurrently.
  91 *
  92 * It is still required that the caller manage the synchronization and
  93 * lifetimes of the items. So if RCU lock-free lookups are used, typically
  94 * this would mean that the items have their own locks, or are amenable to
  95 * lock-free access; and that the items are freed by RCU (or only freed after
  96 * having been deleted from the idr tree *and* a synchronize_rcu() grace
  97 * period).
  98 */
  99
 100/*
 101 * This is what we export.
 102 */
 103
 104void *idr_find(struct idr *idp, int id);
 105int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
 106int idr_get_new(struct idr *idp, void *ptr, int *id);
 107int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
 108int idr_for_each(struct idr *idp,
 109                 int (*fn)(int id, void *p, void *data), void *data);
 110void *idr_get_next(struct idr *idp, int *nextid);
 111void *idr_replace(struct idr *idp, void *ptr, int id);
 112void idr_remove(struct idr *idp, int id);
 113void idr_remove_all(struct idr *idp);
 114void idr_destroy(struct idr *idp);
 115void idr_init(struct idr *idp);
 116
 117
 118/*
 119 * IDA - IDR based id allocator, use when translation from id to
 120 * pointer isn't necessary.
 121 *
 122 * IDA_BITMAP_LONGS is calculated to be one less to accommodate
 123 * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
 124 */
 125#define IDA_CHUNK_SIZE          128     /* 128 bytes per chunk */
 126#define IDA_BITMAP_LONGS        (IDA_CHUNK_SIZE / sizeof(long) - 1)
 127#define IDA_BITMAP_BITS         (IDA_BITMAP_LONGS * sizeof(long) * 8)
 128
 129struct ida_bitmap {
 130        long                    nr_busy;
 131        unsigned long           bitmap[IDA_BITMAP_LONGS];
 132};
 133
 134struct ida {
 135        struct idr              idr;
 136        struct ida_bitmap       *free_bitmap;
 137};
 138
 139#define IDA_INIT(name)          { .idr = IDR_INIT(name), .free_bitmap = NULL, }
 140#define DEFINE_IDA(name)        struct ida name = IDA_INIT(name)
 141
 142int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
 143int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
 144int ida_get_new(struct ida *ida, int *p_id);
 145void ida_remove(struct ida *ida, int id);
 146void ida_destroy(struct ida *ida);
 147void ida_init(struct ida *ida);
 148
 149int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
 150                   gfp_t gfp_mask);
 151void ida_simple_remove(struct ida *ida, unsigned int id);
 152
 153void __init idr_init_cache(void);
 154
 155#endif /* __IDR_H__ */
 156