linux/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  19 *
  20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  21 * CA 95054 USA or visit www.sun.com if you need additional information or
  22 * have any questions.
  23 *
  24 * GPL HEADER END
  25 */
  26/*
  27 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  28 * Use is subject to license terms.
  29 *
  30 * Copyright (c) 2012, 2015 Intel Corporation.
  31 */
  32/*
  33 * This file is part of Lustre, http://www.lustre.org/
  34 * Lustre is a trademark of Sun Microsystems, Inc.
  35 *
  36 * libcfs/include/libcfs/libcfs_hash.h
  37 *
  38 * Hashing routines
  39 *
  40 */
  41
  42#ifndef __LIBCFS_HASH_H__
  43#define __LIBCFS_HASH_H__
  44
  45#include <linux/hash.h>
  46
  47/*
  48 * Knuth recommends primes in approximately golden ratio to the maximum
  49 * integer representable by a machine word for multiplicative hashing.
  50 * Chuck Lever verified the effectiveness of this technique:
  51 * http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
  52 *
  53 * These primes are chosen to be bit-sparse, that is operations on
  54 * them can use shifts and additions instead of multiplications for
  55 * machines where multiplications are slow.
  56 */
  57/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
  58#define CFS_GOLDEN_RATIO_PRIME_32 0x9e370001UL
  59/*  2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
  60#define CFS_GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
  61
  62/** disable debug */
  63#define CFS_HASH_DEBUG_NONE     0
  64/** record hash depth and output to console when it's too deep,
  65 *  computing overhead is low but consume more memory */
  66#define CFS_HASH_DEBUG_1        1
  67/** expensive, check key validation */
  68#define CFS_HASH_DEBUG_2        2
  69
  70#define CFS_HASH_DEBUG_LEVEL    CFS_HASH_DEBUG_NONE
  71
  72struct cfs_hash_ops;
  73struct cfs_hash_lock_ops;
  74struct cfs_hash_hlist_ops;
  75
  76union cfs_hash_lock {
  77        rwlock_t                rw;             /**< rwlock */
  78        spinlock_t              spin;           /**< spinlock */
  79};
  80
  81/**
  82 * cfs_hash_bucket is a container of:
  83 * - lock, counter ...
  84 * - array of hash-head starting from hsb_head[0], hash-head can be one of
  85 *   . struct cfs_hash_head
  86 *   . struct cfs_hash_head_dep
  87 *   . struct cfs_hash_dhead
  88 *   . struct cfs_hash_dhead_dep
  89 *   which depends on requirement of user
  90 * - some extra bytes (caller can require it while creating hash)
  91 */
  92struct cfs_hash_bucket {
  93        union cfs_hash_lock     hsb_lock;       /**< bucket lock */
  94        __u32                   hsb_count;      /**< current entries */
  95        __u32                   hsb_version;    /**< change version */
  96        unsigned int            hsb_index;      /**< index of bucket */
  97        int                     hsb_depmax;     /**< max depth on bucket */
  98        long                    hsb_head[0];    /**< hash-head array */
  99};
 100
 101/**
 102 * cfs_hash bucket descriptor, it's normally in stack of caller
 103 */
 104struct cfs_hash_bd {
 105        /* address of bucket */
 106        struct cfs_hash_bucket  *bd_bucket;
 107        /* offset in bucket */
 108        unsigned int             bd_offset;
 109};
 110
 111#define CFS_HASH_NAME_LEN       16      /**< default name length */
 112#define CFS_HASH_BIGNAME_LEN    64      /**< bigname for param tree */
 113
 114#define CFS_HASH_BKT_BITS       3       /**< default bits of bucket */
 115#define CFS_HASH_BITS_MAX       30      /**< max bits of bucket */
 116#define CFS_HASH_BITS_MIN       CFS_HASH_BKT_BITS
 117
 118/**
 119 * common hash attributes.
 120 */
 121enum cfs_hash_tag {
 122        /**
 123         * don't need any lock, caller will protect operations with it's
 124         * own lock. With this flag:
 125         *  . CFS_HASH_NO_BKTLOCK, CFS_HASH_RW_BKTLOCK, CFS_HASH_SPIN_BKTLOCK
 126         *    will be ignored.
 127         *  . Some functions will be disabled with this flag, i.e:
 128         *    cfs_hash_for_each_empty, cfs_hash_rehash
 129         */
 130        CFS_HASH_NO_LOCK        = 1 << 0,
 131        /** no bucket lock, use one spinlock to protect the whole hash */
 132        CFS_HASH_NO_BKTLOCK     = 1 << 1,
 133        /** rwlock to protect bucket */
 134        CFS_HASH_RW_BKTLOCK     = 1 << 2,
 135        /** spinlock to protect bucket */
 136        CFS_HASH_SPIN_BKTLOCK   = 1 << 3,
 137        /** always add new item to tail */
 138        CFS_HASH_ADD_TAIL       = 1 << 4,
 139        /** hash-table doesn't have refcount on item */
 140        CFS_HASH_NO_ITEMREF     = 1 << 5,
 141        /** big name for param-tree */
 142        CFS_HASH_BIGNAME        = 1 << 6,
 143        /** track global count */
 144        CFS_HASH_COUNTER        = 1 << 7,
 145        /** rehash item by new key */
 146        CFS_HASH_REHASH_KEY     = 1 << 8,
 147        /** Enable dynamic hash resizing */
 148        CFS_HASH_REHASH         = 1 << 9,
 149        /** can shrink hash-size */
 150        CFS_HASH_SHRINK         = 1 << 10,
 151        /** assert hash is empty on exit */
 152        CFS_HASH_ASSERT_EMPTY   = 1 << 11,
 153        /** record hlist depth */
 154        CFS_HASH_DEPTH          = 1 << 12,
 155        /**
 156         * rehash is always scheduled in a different thread, so current
 157         * change on hash table is non-blocking
 158         */
 159        CFS_HASH_NBLK_CHANGE    = 1 << 13,
 160        /** NB, we typed hs_flags as  __u16, please change it
 161         * if you need to extend >=16 flags */
 162};
 163
 164/** most used attributes */
 165#define CFS_HASH_DEFAULT        (CFS_HASH_RW_BKTLOCK | \
 166                                 CFS_HASH_COUNTER | CFS_HASH_REHASH)
 167
 168/**
 169 * cfs_hash is a hash-table implementation for general purpose, it can support:
 170 *    . two refcount modes
 171 *      hash-table with & without refcount
 172 *    . four lock modes
 173 *      nolock, one-spinlock, rw-bucket-lock, spin-bucket-lock
 174 *    . general operations
 175 *      lookup, add(add_tail or add_head), delete
 176 *    . rehash
 177 *      grows or shrink
 178 *    . iteration
 179 *      locked iteration and unlocked iteration
 180 *    . bigname
 181 *      support long name hash
 182 *    . debug
 183 *      trace max searching depth
 184 *
 185 * Rehash:
 186 * When the htable grows or shrinks, a separate task (cfs_hash_rehash_worker)
 187 * is spawned to handle the rehash in the background, it's possible that other
 188 * processes can concurrently perform additions, deletions, and lookups
 189 * without being blocked on rehash completion, because rehash will release
 190 * the global wrlock for each bucket.
 191 *
 192 * rehash and iteration can't run at the same time because it's too tricky
 193 * to keep both of them safe and correct.
 194 * As they are relatively rare operations, so:
 195 *   . if iteration is in progress while we try to launch rehash, then
 196 *     it just giveup, iterator will launch rehash at the end.
 197 *   . if rehash is in progress while we try to iterate the hash table,
 198 *     then we just wait (shouldn't be very long time), anyway, nobody
 199 *     should expect iteration of whole hash-table to be non-blocking.
 200 *
 201 * During rehashing, a (key,object) pair may be in one of two buckets,
 202 * depending on whether the worker task has yet to transfer the object
 203 * to its new location in the table. Lookups and deletions need to search both
 204 * locations; additions must take care to only insert into the new bucket.
 205 */
 206
 207struct cfs_hash {
 208        /** serialize with rehash, or serialize all operations if
 209         * the hash-table has CFS_HASH_NO_BKTLOCK */
 210        union cfs_hash_lock             hs_lock;
 211        /** hash operations */
 212        struct cfs_hash_ops             *hs_ops;
 213        /** hash lock operations */
 214        struct cfs_hash_lock_ops        *hs_lops;
 215        /** hash list operations */
 216        struct cfs_hash_hlist_ops       *hs_hops;
 217        /** hash buckets-table */
 218        struct cfs_hash_bucket          **hs_buckets;
 219        /** total number of items on this hash-table */
 220        atomic_t                        hs_count;
 221        /** hash flags, see cfs_hash_tag for detail */
 222        __u16                           hs_flags;
 223        /** # of extra-bytes for bucket, for user saving extended attributes */
 224        __u16                           hs_extra_bytes;
 225        /** wants to iterate */
 226        __u8                            hs_iterating;
 227        /** hash-table is dying */
 228        __u8                            hs_exiting;
 229        /** current hash bits */
 230        __u8                            hs_cur_bits;
 231        /** min hash bits */
 232        __u8                            hs_min_bits;
 233        /** max hash bits */
 234        __u8                            hs_max_bits;
 235        /** bits for rehash */
 236        __u8                            hs_rehash_bits;
 237        /** bits for each bucket */
 238        __u8                            hs_bkt_bits;
 239        /** resize min threshold */
 240        __u16                           hs_min_theta;
 241        /** resize max threshold */
 242        __u16                           hs_max_theta;
 243        /** resize count */
 244        __u32                           hs_rehash_count;
 245        /** # of iterators (caller of cfs_hash_for_each_*) */
 246        __u32                           hs_iterators;
 247        /** rehash workitem */
 248        cfs_workitem_t                  hs_rehash_wi;
 249        /** refcount on this hash table */
 250        atomic_t                        hs_refcount;
 251        /** rehash buckets-table */
 252        struct cfs_hash_bucket          **hs_rehash_buckets;
 253#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
 254        /** serialize debug members */
 255        spinlock_t                      hs_dep_lock;
 256        /** max depth */
 257        unsigned int                    hs_dep_max;
 258        /** id of the deepest bucket */
 259        unsigned int                    hs_dep_bkt;
 260        /** offset in the deepest bucket */
 261        unsigned int                    hs_dep_off;
 262        /** bits when we found the max depth */
 263        unsigned int                    hs_dep_bits;
 264        /** workitem to output max depth */
 265        cfs_workitem_t                  hs_dep_wi;
 266#endif
 267        /** name of htable */
 268        char                            hs_name[0];
 269};
 270
 271struct cfs_hash_lock_ops {
 272        /** lock the hash table */
 273        void    (*hs_lock)(union cfs_hash_lock *lock, int exclusive);
 274        /** unlock the hash table */
 275        void    (*hs_unlock)(union cfs_hash_lock *lock, int exclusive);
 276        /** lock the hash bucket */
 277        void    (*hs_bkt_lock)(union cfs_hash_lock *lock, int exclusive);
 278        /** unlock the hash bucket */
 279        void    (*hs_bkt_unlock)(union cfs_hash_lock *lock, int exclusive);
 280};
 281
 282struct cfs_hash_hlist_ops {
 283        /** return hlist_head of hash-head of @bd */
 284        struct hlist_head *(*hop_hhead)(struct cfs_hash *hs,
 285                                        struct cfs_hash_bd *bd);
 286        /** return hash-head size */
 287        int (*hop_hhead_size)(struct cfs_hash *hs);
 288        /** add @hnode to hash-head of @bd */
 289        int (*hop_hnode_add)(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 290                             struct hlist_node *hnode);
 291        /** remove @hnode from hash-head of @bd */
 292        int (*hop_hnode_del)(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 293                             struct hlist_node *hnode);
 294};
 295
 296struct cfs_hash_ops {
 297        /** return hashed value from @key */
 298        unsigned (*hs_hash)(struct cfs_hash *hs, const void *key,
 299                            unsigned mask);
 300        /** return key address of @hnode */
 301        void *   (*hs_key)(struct hlist_node *hnode);
 302        /** copy key from @hnode to @key */
 303        void     (*hs_keycpy)(struct hlist_node *hnode, void *key);
 304        /**
 305         *  compare @key with key of @hnode
 306         *  returns 1 on a match
 307         */
 308        int      (*hs_keycmp)(const void *key, struct hlist_node *hnode);
 309        /** return object address of @hnode, i.e: container_of(...hnode) */
 310        void *   (*hs_object)(struct hlist_node *hnode);
 311        /** get refcount of item, always called with holding bucket-lock */
 312        void     (*hs_get)(struct cfs_hash *hs, struct hlist_node *hnode);
 313        /** release refcount of item */
 314        void     (*hs_put)(struct cfs_hash *hs, struct hlist_node *hnode);
 315        /** release refcount of item, always called with holding bucket-lock */
 316        void     (*hs_put_locked)(struct cfs_hash *hs,
 317                                  struct hlist_node *hnode);
 318        /** it's called before removing of @hnode */
 319        void     (*hs_exit)(struct cfs_hash *hs, struct hlist_node *hnode);
 320};
 321
 322/** total number of buckets in @hs */
 323#define CFS_HASH_NBKT(hs)       \
 324        (1U << ((hs)->hs_cur_bits - (hs)->hs_bkt_bits))
 325
 326/** total number of buckets in @hs while rehashing */
 327#define CFS_HASH_RH_NBKT(hs)    \
 328        (1U << ((hs)->hs_rehash_bits - (hs)->hs_bkt_bits))
 329
 330/** number of hlist for in bucket */
 331#define CFS_HASH_BKT_NHLIST(hs) (1U << (hs)->hs_bkt_bits)
 332
 333/** total number of hlist in @hs */
 334#define CFS_HASH_NHLIST(hs)     (1U << (hs)->hs_cur_bits)
 335
 336/** total number of hlist in @hs while rehashing */
 337#define CFS_HASH_RH_NHLIST(hs)  (1U << (hs)->hs_rehash_bits)
 338
 339static inline int
 340cfs_hash_with_no_lock(struct cfs_hash *hs)
 341{
 342        /* caller will serialize all operations for this hash-table */
 343        return (hs->hs_flags & CFS_HASH_NO_LOCK) != 0;
 344}
 345
 346static inline int
 347cfs_hash_with_no_bktlock(struct cfs_hash *hs)
 348{
 349        /* no bucket lock, one single lock to protect the hash-table */
 350        return (hs->hs_flags & CFS_HASH_NO_BKTLOCK) != 0;
 351}
 352
 353static inline int
 354cfs_hash_with_rw_bktlock(struct cfs_hash *hs)
 355{
 356        /* rwlock to protect hash bucket */
 357        return (hs->hs_flags & CFS_HASH_RW_BKTLOCK) != 0;
 358}
 359
 360static inline int
 361cfs_hash_with_spin_bktlock(struct cfs_hash *hs)
 362{
 363        /* spinlock to protect hash bucket */
 364        return (hs->hs_flags & CFS_HASH_SPIN_BKTLOCK) != 0;
 365}
 366
 367static inline int
 368cfs_hash_with_add_tail(struct cfs_hash *hs)
 369{
 370        return (hs->hs_flags & CFS_HASH_ADD_TAIL) != 0;
 371}
 372
 373static inline int
 374cfs_hash_with_no_itemref(struct cfs_hash *hs)
 375{
 376        /* hash-table doesn't keep refcount on item,
 377         * item can't be removed from hash unless it's
 378         * ZERO refcount */
 379        return (hs->hs_flags & CFS_HASH_NO_ITEMREF) != 0;
 380}
 381
 382static inline int
 383cfs_hash_with_bigname(struct cfs_hash *hs)
 384{
 385        return (hs->hs_flags & CFS_HASH_BIGNAME) != 0;
 386}
 387
 388static inline int
 389cfs_hash_with_counter(struct cfs_hash *hs)
 390{
 391        return (hs->hs_flags & CFS_HASH_COUNTER) != 0;
 392}
 393
 394static inline int
 395cfs_hash_with_rehash(struct cfs_hash *hs)
 396{
 397        return (hs->hs_flags & CFS_HASH_REHASH) != 0;
 398}
 399
 400static inline int
 401cfs_hash_with_rehash_key(struct cfs_hash *hs)
 402{
 403        return (hs->hs_flags & CFS_HASH_REHASH_KEY) != 0;
 404}
 405
 406static inline int
 407cfs_hash_with_shrink(struct cfs_hash *hs)
 408{
 409        return (hs->hs_flags & CFS_HASH_SHRINK) != 0;
 410}
 411
 412static inline int
 413cfs_hash_with_assert_empty(struct cfs_hash *hs)
 414{
 415        return (hs->hs_flags & CFS_HASH_ASSERT_EMPTY) != 0;
 416}
 417
 418static inline int
 419cfs_hash_with_depth(struct cfs_hash *hs)
 420{
 421        return (hs->hs_flags & CFS_HASH_DEPTH) != 0;
 422}
 423
 424static inline int
 425cfs_hash_with_nblk_change(struct cfs_hash *hs)
 426{
 427        return (hs->hs_flags & CFS_HASH_NBLK_CHANGE) != 0;
 428}
 429
 430static inline int
 431cfs_hash_is_exiting(struct cfs_hash *hs)
 432{
 433        /* cfs_hash_destroy is called */
 434        return hs->hs_exiting;
 435}
 436
 437static inline int
 438cfs_hash_is_rehashing(struct cfs_hash *hs)
 439{
 440        /* rehash is launched */
 441        return hs->hs_rehash_bits != 0;
 442}
 443
 444static inline int
 445cfs_hash_is_iterating(struct cfs_hash *hs)
 446{
 447        /* someone is calling cfs_hash_for_each_* */
 448        return hs->hs_iterating || hs->hs_iterators != 0;
 449}
 450
 451static inline int
 452cfs_hash_bkt_size(struct cfs_hash *hs)
 453{
 454        return offsetof(struct cfs_hash_bucket, hsb_head[0]) +
 455               hs->hs_hops->hop_hhead_size(hs) * CFS_HASH_BKT_NHLIST(hs) +
 456               hs->hs_extra_bytes;
 457}
 458
 459static inline unsigned
 460cfs_hash_id(struct cfs_hash *hs, const void *key, unsigned mask)
 461{
 462        return hs->hs_ops->hs_hash(hs, key, mask);
 463}
 464
 465static inline void *
 466cfs_hash_key(struct cfs_hash *hs, struct hlist_node *hnode)
 467{
 468        return hs->hs_ops->hs_key(hnode);
 469}
 470
 471static inline void
 472cfs_hash_keycpy(struct cfs_hash *hs, struct hlist_node *hnode, void *key)
 473{
 474        if (hs->hs_ops->hs_keycpy)
 475                hs->hs_ops->hs_keycpy(hnode, key);
 476}
 477
 478/**
 479 * Returns 1 on a match,
 480 */
 481static inline int
 482cfs_hash_keycmp(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
 483{
 484        return hs->hs_ops->hs_keycmp(key, hnode);
 485}
 486
 487static inline void *
 488cfs_hash_object(struct cfs_hash *hs, struct hlist_node *hnode)
 489{
 490        return hs->hs_ops->hs_object(hnode);
 491}
 492
 493static inline void
 494cfs_hash_get(struct cfs_hash *hs, struct hlist_node *hnode)
 495{
 496        return hs->hs_ops->hs_get(hs, hnode);
 497}
 498
 499static inline void
 500cfs_hash_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 501{
 502        return hs->hs_ops->hs_put_locked(hs, hnode);
 503}
 504
 505static inline void
 506cfs_hash_put(struct cfs_hash *hs, struct hlist_node *hnode)
 507{
 508        return hs->hs_ops->hs_put(hs, hnode);
 509}
 510
 511static inline void
 512cfs_hash_exit(struct cfs_hash *hs, struct hlist_node *hnode)
 513{
 514        if (hs->hs_ops->hs_exit)
 515                hs->hs_ops->hs_exit(hs, hnode);
 516}
 517
 518static inline void cfs_hash_lock(struct cfs_hash *hs, int excl)
 519{
 520        hs->hs_lops->hs_lock(&hs->hs_lock, excl);
 521}
 522
 523static inline void cfs_hash_unlock(struct cfs_hash *hs, int excl)
 524{
 525        hs->hs_lops->hs_unlock(&hs->hs_lock, excl);
 526}
 527
 528static inline int cfs_hash_dec_and_lock(struct cfs_hash *hs,
 529                                        atomic_t *condition)
 530{
 531        LASSERT(cfs_hash_with_no_bktlock(hs));
 532        return atomic_dec_and_lock(condition, &hs->hs_lock.spin);
 533}
 534
 535static inline void cfs_hash_bd_lock(struct cfs_hash *hs,
 536                                    struct cfs_hash_bd *bd, int excl)
 537{
 538        hs->hs_lops->hs_bkt_lock(&bd->bd_bucket->hsb_lock, excl);
 539}
 540
 541static inline void cfs_hash_bd_unlock(struct cfs_hash *hs,
 542                                      struct cfs_hash_bd *bd, int excl)
 543{
 544        hs->hs_lops->hs_bkt_unlock(&bd->bd_bucket->hsb_lock, excl);
 545}
 546
 547/**
 548 * operations on cfs_hash bucket (bd: bucket descriptor),
 549 * they are normally for hash-table without rehash
 550 */
 551void cfs_hash_bd_get(struct cfs_hash *hs, const void *key,
 552                     struct cfs_hash_bd *bd);
 553
 554static inline void
 555cfs_hash_bd_get_and_lock(struct cfs_hash *hs, const void *key,
 556                         struct cfs_hash_bd *bd, int excl)
 557{
 558        cfs_hash_bd_get(hs, key, bd);
 559        cfs_hash_bd_lock(hs, bd, excl);
 560}
 561
 562static inline unsigned
 563cfs_hash_bd_index_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 564{
 565        return bd->bd_offset | (bd->bd_bucket->hsb_index << hs->hs_bkt_bits);
 566}
 567
 568static inline void
 569cfs_hash_bd_index_set(struct cfs_hash *hs, unsigned index,
 570                      struct cfs_hash_bd *bd)
 571{
 572        bd->bd_bucket = hs->hs_buckets[index >> hs->hs_bkt_bits];
 573        bd->bd_offset = index & (CFS_HASH_BKT_NHLIST(hs) - 1U);
 574}
 575
 576static inline void *
 577cfs_hash_bd_extra_get(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 578{
 579        return (void *)bd->bd_bucket +
 580               cfs_hash_bkt_size(hs) - hs->hs_extra_bytes;
 581}
 582
 583static inline __u32
 584cfs_hash_bd_version_get(struct cfs_hash_bd *bd)
 585{
 586        /* need hold cfs_hash_bd_lock */
 587        return bd->bd_bucket->hsb_version;
 588}
 589
 590static inline __u32
 591cfs_hash_bd_count_get(struct cfs_hash_bd *bd)
 592{
 593        /* need hold cfs_hash_bd_lock */
 594        return bd->bd_bucket->hsb_count;
 595}
 596
 597static inline int
 598cfs_hash_bd_depmax_get(struct cfs_hash_bd *bd)
 599{
 600        return bd->bd_bucket->hsb_depmax;
 601}
 602
 603static inline int
 604cfs_hash_bd_compare(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
 605{
 606        if (bd1->bd_bucket->hsb_index != bd2->bd_bucket->hsb_index)
 607                return bd1->bd_bucket->hsb_index - bd2->bd_bucket->hsb_index;
 608
 609        if (bd1->bd_offset != bd2->bd_offset)
 610                return bd1->bd_offset - bd2->bd_offset;
 611
 612        return 0;
 613}
 614
 615void cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 616                            struct hlist_node *hnode);
 617void cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 618                            struct hlist_node *hnode);
 619void cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
 620                             struct cfs_hash_bd *bd_new,
 621                             struct hlist_node *hnode);
 622
 623static inline int
 624cfs_hash_bd_dec_and_lock(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 625                         atomic_t *condition)
 626{
 627        LASSERT(cfs_hash_with_spin_bktlock(hs));
 628        return atomic_dec_and_lock(condition, &bd->bd_bucket->hsb_lock.spin);
 629}
 630
 631static inline struct hlist_head *
 632cfs_hash_bd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 633{
 634        return hs->hs_hops->hop_hhead(hs, bd);
 635}
 636
 637struct hlist_node *
 638cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 639                          const void *key);
 640struct hlist_node *
 641cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 642                        const void *key);
 643
 644/**
 645 * operations on cfs_hash bucket (bd: bucket descriptor),
 646 * they are safe for hash-table with rehash
 647 */
 648void cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
 649                          struct cfs_hash_bd *bds);
 650void cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 651                           int excl);
 652void cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 653                             int excl);
 654
 655static inline void
 656cfs_hash_dual_bd_get_and_lock(struct cfs_hash *hs, const void *key,
 657                              struct cfs_hash_bd *bds, int excl)
 658{
 659        cfs_hash_dual_bd_get(hs, key, bds);
 660        cfs_hash_dual_bd_lock(hs, bds, excl);
 661}
 662
 663struct hlist_node *
 664cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 665                               const void *key);
 666struct hlist_node *
 667cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 668                                const void *key, struct hlist_node *hnode,
 669                                int insist_add);
 670struct hlist_node *
 671cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 672                                const void *key, struct hlist_node *hnode);
 673
 674/* Hash init/cleanup functions */
 675struct cfs_hash *
 676cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
 677                unsigned bkt_bits, unsigned extra_bytes,
 678                unsigned min_theta, unsigned max_theta,
 679                struct cfs_hash_ops *ops, unsigned flags);
 680
 681struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs);
 682void cfs_hash_putref(struct cfs_hash *hs);
 683
 684/* Hash addition functions */
 685void cfs_hash_add(struct cfs_hash *hs, const void *key,
 686                  struct hlist_node *hnode);
 687int cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
 688                        struct hlist_node *hnode);
 689void *cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
 690                              struct hlist_node *hnode);
 691
 692/* Hash deletion functions */
 693void *cfs_hash_del(struct cfs_hash *hs, const void *key,
 694                   struct hlist_node *hnode);
 695void *cfs_hash_del_key(struct cfs_hash *hs, const void *key);
 696
 697/* Hash lookup/for_each functions */
 698#define CFS_HASH_LOOP_HOG       1024
 699
 700typedef int (*cfs_hash_for_each_cb_t)(struct cfs_hash *hs,
 701                                      struct cfs_hash_bd *bd,
 702                                      struct hlist_node *node,
 703                                      void *data);
 704void *
 705cfs_hash_lookup(struct cfs_hash *hs, const void *key);
 706void
 707cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
 708void
 709cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t, void *data);
 710int
 711cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t,
 712                         void *data);
 713int
 714cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t,
 715                        void *data);
 716void
 717cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
 718                      cfs_hash_for_each_cb_t, void *data);
 719typedef int (*cfs_hash_cond_opt_cb_t)(void *obj, void *data);
 720void
 721cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t, void *data);
 722
 723void
 724cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
 725                        cfs_hash_for_each_cb_t, void *data);
 726int  cfs_hash_is_empty(struct cfs_hash *hs);
 727__u64 cfs_hash_size_get(struct cfs_hash *hs);
 728
 729/*
 730 * Rehash - Theta is calculated to be the average chained
 731 * hash depth assuming a perfectly uniform hash function.
 732 */
 733void cfs_hash_rehash_cancel_locked(struct cfs_hash *hs);
 734void cfs_hash_rehash_cancel(struct cfs_hash *hs);
 735int  cfs_hash_rehash(struct cfs_hash *hs, int do_rehash);
 736void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
 737                         void *new_key, struct hlist_node *hnode);
 738
 739#if CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1
 740/* Validate hnode references the correct key */
 741static inline void
 742cfs_hash_key_validate(struct cfs_hash *hs, const void *key,
 743                      struct hlist_node *hnode)
 744{
 745        LASSERT(cfs_hash_keycmp(hs, key, hnode));
 746}
 747
 748/* Validate hnode is in the correct bucket */
 749static inline void
 750cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 751                         struct hlist_node *hnode)
 752{
 753        struct cfs_hash_bd bds[2];
 754
 755        cfs_hash_dual_bd_get(hs, cfs_hash_key(hs, hnode), bds);
 756        LASSERT(bds[0].bd_bucket == bd->bd_bucket ||
 757                bds[1].bd_bucket == bd->bd_bucket);
 758}
 759
 760#else /* CFS_HASH_DEBUG_LEVEL > CFS_HASH_DEBUG_1 */
 761
 762static inline void
 763cfs_hash_key_validate(struct cfs_hash *hs, const void *key,
 764                      struct hlist_node *hnode) {}
 765
 766static inline void
 767cfs_hash_bucket_validate(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 768                         struct hlist_node *hnode) {}
 769
 770#endif /* CFS_HASH_DEBUG_LEVEL */
 771
 772#define CFS_HASH_THETA_BITS     10
 773#define CFS_HASH_MIN_THETA      (1U << (CFS_HASH_THETA_BITS - 1))
 774#define CFS_HASH_MAX_THETA      (1U << (CFS_HASH_THETA_BITS + 1))
 775
 776/* Return integer component of theta */
 777static inline int __cfs_hash_theta_int(int theta)
 778{
 779        return (theta >> CFS_HASH_THETA_BITS);
 780}
 781
 782/* Return a fractional value between 0 and 999 */
 783static inline int __cfs_hash_theta_frac(int theta)
 784{
 785        return ((theta * 1000) >> CFS_HASH_THETA_BITS) -
 786               (__cfs_hash_theta_int(theta) * 1000);
 787}
 788
 789static inline int __cfs_hash_theta(struct cfs_hash *hs)
 790{
 791        return (atomic_read(&hs->hs_count) <<
 792                CFS_HASH_THETA_BITS) >> hs->hs_cur_bits;
 793}
 794
 795static inline void
 796__cfs_hash_set_theta(struct cfs_hash *hs, int min, int max)
 797{
 798        LASSERT(min < max);
 799        hs->hs_min_theta = (__u16)min;
 800        hs->hs_max_theta = (__u16)max;
 801}
 802
 803/* Generic debug formatting routines mainly for proc handler */
 804struct seq_file;
 805void cfs_hash_debug_header(struct seq_file *m);
 806void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m);
 807
 808/*
 809 * Generic djb2 hash algorithm for character arrays.
 810 */
 811static inline unsigned
 812cfs_hash_djb2_hash(const void *key, size_t size, unsigned mask)
 813{
 814        unsigned i, hash = 5381;
 815
 816        LASSERT(key != NULL);
 817
 818        for (i = 0; i < size; i++)
 819                hash = hash * 33 + ((char *)key)[i];
 820
 821        return (hash & mask);
 822}
 823
 824/*
 825 * Generic u32 hash algorithm.
 826 */
 827static inline unsigned
 828cfs_hash_u32_hash(const __u32 key, unsigned mask)
 829{
 830        return ((key * CFS_GOLDEN_RATIO_PRIME_32) & mask);
 831}
 832
 833/*
 834 * Generic u64 hash algorithm.
 835 */
 836static inline unsigned
 837cfs_hash_u64_hash(const __u64 key, unsigned mask)
 838{
 839        return ((unsigned)(key * CFS_GOLDEN_RATIO_PRIME_64) & mask);
 840}
 841
 842/** iterate over all buckets in @bds (array of struct cfs_hash_bd) */
 843#define cfs_hash_for_each_bd(bds, n, i) \
 844        for (i = 0; i < n && (bds)[i].bd_bucket != NULL; i++)
 845
 846/** iterate over all buckets of @hs */
 847#define cfs_hash_for_each_bucket(hs, bd, pos)                   \
 848        for (pos = 0;                                           \
 849             pos < CFS_HASH_NBKT(hs) &&                         \
 850             ((bd)->bd_bucket = (hs)->hs_buckets[pos]) != NULL; pos++)
 851
 852/** iterate over all hlist of bucket @bd */
 853#define cfs_hash_bd_for_each_hlist(hs, bd, hlist)               \
 854        for ((bd)->bd_offset = 0;                               \
 855             (bd)->bd_offset < CFS_HASH_BKT_NHLIST(hs) &&       \
 856             (hlist = cfs_hash_bd_hhead(hs, bd)) != NULL;       \
 857             (bd)->bd_offset++)
 858
 859/* !__LIBCFS__HASH_H__ */
 860#endif
 861