linux/drivers/staging/lustre/lnet/libcfs/hash.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  19 *
  20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  21 * CA 95054 USA or visit www.sun.com if you need additional information or
  22 * have any questions.
  23 *
  24 * GPL HEADER END
  25 */
  26/*
  27 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
  28 * Use is subject to license terms.
  29 *
  30 * Copyright (c) 2011, 2012, Intel Corporation.
  31 */
  32/*
  33 * This file is part of Lustre, http://www.lustre.org/
  34 * Lustre is a trademark of Sun Microsystems, Inc.
  35 *
  36 * libcfs/libcfs/hash.c
  37 *
  38 * Implement a hash class for hash process in lustre system.
  39 *
  40 * Author: YuZhangyong <yzy@clusterfs.com>
  41 *
  42 * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
  43 * - Simplified API and improved documentation
  44 * - Added per-hash feature flags:
  45 *   * CFS_HASH_DEBUG additional validation
  46 *   * CFS_HASH_REHASH dynamic rehashing
  47 * - Added per-hash statistics
  48 * - General performance enhancements
  49 *
  50 * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
  51 * - move all stuff to libcfs
  52 * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
  53 * - ignore hs_rwlock if without CFS_HASH_REHASH setting
  54 * - buckets are allocated one by one(instead of contiguous memory),
  55 *   to avoid unnecessary cacheline conflict
  56 *
  57 * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
  58 * - "bucket" is a group of hlist_head now, user can specify bucket size
  59 *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
  60 *   one lock for reducing memory overhead.
  61 *
  62 * - support lockless hash, caller will take care of locks:
  63 *   avoid lock overhead for hash tables that are already protected
  64 *   by locking in the caller for another reason
  65 *
  66 * - support both spin_lock/rwlock for bucket:
  67 *   overhead of spinlock contention is lower than read/write
  68 *   contention of rwlock, so using spinlock to serialize operations on
  69 *   bucket is more reasonable for those frequently changed hash tables
  70 *
  71 * - support one-single lock mode:
  72 *   one lock to protect all hash operations to avoid overhead of
  73 *   multiple locks if hash table is always small
  74 *
  75 * - removed a lot of unnecessary addref & decref on hash element:
  76 *   addref & decref are atomic operations in many use-cases which
  77 *   are expensive.
  78 *
  79 * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
  80 *   some lustre use-cases require these functions to be strictly
  81 *   non-blocking, we need to schedule required rehash on a different
  82 *   thread on those cases.
  83 *
  84 * - safer rehash on large hash table
  85 *   In old implementation, rehash function will exclusively lock the
  86 *   hash table and finish rehash in one batch, it's dangerous on SMP
  87 *   system because rehash millions of elements could take long time.
  88 *   New implemented rehash can release lock and relax CPU in middle
  89 *   of rehash, it's safe for another thread to search/change on the
  90 *   hash table even it's in rehasing.
  91 *
  92 * - support two different refcount modes
  93 *   . hash table has refcount on element
  94 *   . hash table doesn't change refcount on adding/removing element
  95 *
  96 * - support long name hash table (for param-tree)
  97 *
  98 * - fix a bug for cfs_hash_rehash_key:
  99 *   in old implementation, cfs_hash_rehash_key could screw up the
 100 *   hash-table because @key is overwritten without any protection.
 101 *   Now we need user to define hs_keycpy for those rehash enabled
 102 *   hash tables, cfs_hash_rehash_key will overwrite hash-key
 103 *   inside lock by calling hs_keycpy.
 104 *
 105 * - better hash iteration:
 106 *   Now we support both locked iteration & lockless iteration of hash
 107 *   table. Also, user can break the iteration by return 1 in callback.
 108 */
 109#include <linux/seq_file.h>
 110#include <linux/log2.h>
 111
 112#include "../../include/linux/libcfs/libcfs.h"
 113
 114#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
 115static unsigned int warn_on_depth = 8;
 116module_param(warn_on_depth, uint, 0644);
 117MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
 118#endif
 119
 120struct cfs_wi_sched *cfs_sched_rehash;
 121
 122static inline void
 123cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
 124
 125static inline void
 126cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
 127
 128static inline void
 129cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
 130        __acquires(&lock->spin)
 131{
 132        spin_lock(&lock->spin);
 133}
 134
 135static inline void
 136cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
 137        __releases(&lock->spin)
 138{
 139        spin_unlock(&lock->spin);
 140}
 141
 142static inline void
 143cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
 144        __acquires(&lock->rw)
 145{
 146        if (!exclusive)
 147                read_lock(&lock->rw);
 148        else
 149                write_lock(&lock->rw);
 150}
 151
 152static inline void
 153cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
 154        __releases(&lock->rw)
 155{
 156        if (!exclusive)
 157                read_unlock(&lock->rw);
 158        else
 159                write_unlock(&lock->rw);
 160}
 161
 162/** No lock hash */
 163static struct cfs_hash_lock_ops cfs_hash_nl_lops = {
 164        .hs_lock        = cfs_hash_nl_lock,
 165        .hs_unlock      = cfs_hash_nl_unlock,
 166        .hs_bkt_lock    = cfs_hash_nl_lock,
 167        .hs_bkt_unlock  = cfs_hash_nl_unlock,
 168};
 169
 170/** no bucket lock, one spinlock to protect everything */
 171static struct cfs_hash_lock_ops cfs_hash_nbl_lops = {
 172        .hs_lock        = cfs_hash_spin_lock,
 173        .hs_unlock      = cfs_hash_spin_unlock,
 174        .hs_bkt_lock    = cfs_hash_nl_lock,
 175        .hs_bkt_unlock  = cfs_hash_nl_unlock,
 176};
 177
 178/** spin bucket lock, rehash is enabled */
 179static struct cfs_hash_lock_ops cfs_hash_bkt_spin_lops = {
 180        .hs_lock        = cfs_hash_rw_lock,
 181        .hs_unlock      = cfs_hash_rw_unlock,
 182        .hs_bkt_lock    = cfs_hash_spin_lock,
 183        .hs_bkt_unlock  = cfs_hash_spin_unlock,
 184};
 185
 186/** rw bucket lock, rehash is enabled */
 187static struct cfs_hash_lock_ops cfs_hash_bkt_rw_lops = {
 188        .hs_lock        = cfs_hash_rw_lock,
 189        .hs_unlock      = cfs_hash_rw_unlock,
 190        .hs_bkt_lock    = cfs_hash_rw_lock,
 191        .hs_bkt_unlock  = cfs_hash_rw_unlock,
 192};
 193
 194/** spin bucket lock, rehash is disabled */
 195static struct cfs_hash_lock_ops cfs_hash_nr_bkt_spin_lops = {
 196        .hs_lock        = cfs_hash_nl_lock,
 197        .hs_unlock      = cfs_hash_nl_unlock,
 198        .hs_bkt_lock    = cfs_hash_spin_lock,
 199        .hs_bkt_unlock  = cfs_hash_spin_unlock,
 200};
 201
 202/** rw bucket lock, rehash is disabled */
 203static struct cfs_hash_lock_ops cfs_hash_nr_bkt_rw_lops = {
 204        .hs_lock        = cfs_hash_nl_lock,
 205        .hs_unlock      = cfs_hash_nl_unlock,
 206        .hs_bkt_lock    = cfs_hash_rw_lock,
 207        .hs_bkt_unlock  = cfs_hash_rw_unlock,
 208};
 209
 210static void
 211cfs_hash_lock_setup(struct cfs_hash *hs)
 212{
 213        if (cfs_hash_with_no_lock(hs)) {
 214                hs->hs_lops = &cfs_hash_nl_lops;
 215
 216        } else if (cfs_hash_with_no_bktlock(hs)) {
 217                hs->hs_lops = &cfs_hash_nbl_lops;
 218                spin_lock_init(&hs->hs_lock.spin);
 219
 220        } else if (cfs_hash_with_rehash(hs)) {
 221                rwlock_init(&hs->hs_lock.rw);
 222
 223                if (cfs_hash_with_rw_bktlock(hs))
 224                        hs->hs_lops = &cfs_hash_bkt_rw_lops;
 225                else if (cfs_hash_with_spin_bktlock(hs))
 226                        hs->hs_lops = &cfs_hash_bkt_spin_lops;
 227                else
 228                        LBUG();
 229        } else {
 230                if (cfs_hash_with_rw_bktlock(hs))
 231                        hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
 232                else if (cfs_hash_with_spin_bktlock(hs))
 233                        hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
 234                else
 235                        LBUG();
 236        }
 237}
 238
 239/**
 240 * Simple hash head without depth tracking
 241 * new element is always added to head of hlist
 242 */
 243struct cfs_hash_head {
 244        struct hlist_head       hh_head;        /**< entries list */
 245};
 246
 247static int
 248cfs_hash_hh_hhead_size(struct cfs_hash *hs)
 249{
 250        return sizeof(struct cfs_hash_head);
 251}
 252
 253static struct hlist_head *
 254cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 255{
 256        struct cfs_hash_head *head;
 257
 258        head = (struct cfs_hash_head *)&bd->bd_bucket->hsb_head[0];
 259        return &head[bd->bd_offset].hh_head;
 260}
 261
 262static int
 263cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 264                      struct hlist_node *hnode)
 265{
 266        hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
 267        return -1; /* unknown depth */
 268}
 269
 270static int
 271cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 272                      struct hlist_node *hnode)
 273{
 274        hlist_del_init(hnode);
 275        return -1; /* unknown depth */
 276}
 277
 278/**
 279 * Simple hash head with depth tracking
 280 * new element is always added to head of hlist
 281 */
 282struct cfs_hash_head_dep {
 283        struct hlist_head       hd_head;        /**< entries list */
 284        unsigned int            hd_depth;       /**< list length */
 285};
 286
 287static int
 288cfs_hash_hd_hhead_size(struct cfs_hash *hs)
 289{
 290        return sizeof(struct cfs_hash_head_dep);
 291}
 292
 293static struct hlist_head *
 294cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 295{
 296        struct cfs_hash_head_dep   *head;
 297
 298        head = (struct cfs_hash_head_dep *)&bd->bd_bucket->hsb_head[0];
 299        return &head[bd->bd_offset].hd_head;
 300}
 301
 302static int
 303cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 304                      struct hlist_node *hnode)
 305{
 306        struct cfs_hash_head_dep *hh;
 307
 308        hh = container_of(cfs_hash_hd_hhead(hs, bd),
 309                          struct cfs_hash_head_dep, hd_head);
 310        hlist_add_head(hnode, &hh->hd_head);
 311        return ++hh->hd_depth;
 312}
 313
 314static int
 315cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 316                      struct hlist_node *hnode)
 317{
 318        struct cfs_hash_head_dep *hh;
 319
 320        hh = container_of(cfs_hash_hd_hhead(hs, bd),
 321                          struct cfs_hash_head_dep, hd_head);
 322        hlist_del_init(hnode);
 323        return --hh->hd_depth;
 324}
 325
 326/**
 327 * double links hash head without depth tracking
 328 * new element is always added to tail of hlist
 329 */
 330struct cfs_hash_dhead {
 331        struct hlist_head       dh_head;        /**< entries list */
 332        struct hlist_node       *dh_tail;       /**< the last entry */
 333};
 334
 335static int
 336cfs_hash_dh_hhead_size(struct cfs_hash *hs)
 337{
 338        return sizeof(struct cfs_hash_dhead);
 339}
 340
 341static struct hlist_head *
 342cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 343{
 344        struct cfs_hash_dhead *head;
 345
 346        head = (struct cfs_hash_dhead *)&bd->bd_bucket->hsb_head[0];
 347        return &head[bd->bd_offset].dh_head;
 348}
 349
 350static int
 351cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 352                      struct hlist_node *hnode)
 353{
 354        struct cfs_hash_dhead *dh;
 355
 356        dh = container_of(cfs_hash_dh_hhead(hs, bd),
 357                          struct cfs_hash_dhead, dh_head);
 358        if (dh->dh_tail) /* not empty */
 359                hlist_add_behind(hnode, dh->dh_tail);
 360        else /* empty list */
 361                hlist_add_head(hnode, &dh->dh_head);
 362        dh->dh_tail = hnode;
 363        return -1; /* unknown depth */
 364}
 365
 366static int
 367cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 368                      struct hlist_node *hnd)
 369{
 370        struct cfs_hash_dhead *dh;
 371
 372        dh = container_of(cfs_hash_dh_hhead(hs, bd),
 373                          struct cfs_hash_dhead, dh_head);
 374        if (!hnd->next) { /* it's the tail */
 375                dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
 376                              container_of(hnd->pprev, struct hlist_node, next);
 377        }
 378        hlist_del_init(hnd);
 379        return -1; /* unknown depth */
 380}
 381
 382/**
 383 * double links hash head with depth tracking
 384 * new element is always added to tail of hlist
 385 */
 386struct cfs_hash_dhead_dep {
 387        struct hlist_head       dd_head;        /**< entries list */
 388        struct hlist_node       *dd_tail;       /**< the last entry */
 389        unsigned int            dd_depth;       /**< list length */
 390};
 391
 392static int
 393cfs_hash_dd_hhead_size(struct cfs_hash *hs)
 394{
 395        return sizeof(struct cfs_hash_dhead_dep);
 396}
 397
 398static struct hlist_head *
 399cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
 400{
 401        struct cfs_hash_dhead_dep *head;
 402
 403        head = (struct cfs_hash_dhead_dep *)&bd->bd_bucket->hsb_head[0];
 404        return &head[bd->bd_offset].dd_head;
 405}
 406
 407static int
 408cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 409                      struct hlist_node *hnode)
 410{
 411        struct cfs_hash_dhead_dep *dh;
 412
 413        dh = container_of(cfs_hash_dd_hhead(hs, bd),
 414                          struct cfs_hash_dhead_dep, dd_head);
 415        if (dh->dd_tail) /* not empty */
 416                hlist_add_behind(hnode, dh->dd_tail);
 417        else /* empty list */
 418                hlist_add_head(hnode, &dh->dd_head);
 419        dh->dd_tail = hnode;
 420        return ++dh->dd_depth;
 421}
 422
 423static int
 424cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 425                      struct hlist_node *hnd)
 426{
 427        struct cfs_hash_dhead_dep *dh;
 428
 429        dh = container_of(cfs_hash_dd_hhead(hs, bd),
 430                          struct cfs_hash_dhead_dep, dd_head);
 431        if (!hnd->next) { /* it's the tail */
 432                dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
 433                              container_of(hnd->pprev, struct hlist_node, next);
 434        }
 435        hlist_del_init(hnd);
 436        return --dh->dd_depth;
 437}
 438
 439static struct cfs_hash_hlist_ops cfs_hash_hh_hops = {
 440        .hop_hhead      = cfs_hash_hh_hhead,
 441        .hop_hhead_size = cfs_hash_hh_hhead_size,
 442        .hop_hnode_add  = cfs_hash_hh_hnode_add,
 443        .hop_hnode_del  = cfs_hash_hh_hnode_del,
 444};
 445
 446static struct cfs_hash_hlist_ops cfs_hash_hd_hops = {
 447        .hop_hhead      = cfs_hash_hd_hhead,
 448        .hop_hhead_size = cfs_hash_hd_hhead_size,
 449        .hop_hnode_add  = cfs_hash_hd_hnode_add,
 450        .hop_hnode_del  = cfs_hash_hd_hnode_del,
 451};
 452
 453static struct cfs_hash_hlist_ops cfs_hash_dh_hops = {
 454        .hop_hhead      = cfs_hash_dh_hhead,
 455        .hop_hhead_size = cfs_hash_dh_hhead_size,
 456        .hop_hnode_add  = cfs_hash_dh_hnode_add,
 457        .hop_hnode_del  = cfs_hash_dh_hnode_del,
 458};
 459
 460static struct cfs_hash_hlist_ops cfs_hash_dd_hops = {
 461        .hop_hhead      = cfs_hash_dd_hhead,
 462        .hop_hhead_size = cfs_hash_dd_hhead_size,
 463        .hop_hnode_add  = cfs_hash_dd_hnode_add,
 464        .hop_hnode_del  = cfs_hash_dd_hnode_del,
 465};
 466
 467static void
 468cfs_hash_hlist_setup(struct cfs_hash *hs)
 469{
 470        if (cfs_hash_with_add_tail(hs)) {
 471                hs->hs_hops = cfs_hash_with_depth(hs) ?
 472                              &cfs_hash_dd_hops : &cfs_hash_dh_hops;
 473        } else {
 474                hs->hs_hops = cfs_hash_with_depth(hs) ?
 475                              &cfs_hash_hd_hops : &cfs_hash_hh_hops;
 476        }
 477}
 478
 479static void
 480cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
 481                     unsigned int bits, const void *key, struct cfs_hash_bd *bd)
 482{
 483        unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
 484
 485        LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
 486
 487        bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
 488        bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
 489}
 490
 491void
 492cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
 493{
 494        /* NB: caller should hold hs->hs_rwlock if REHASH is set */
 495        if (likely(!hs->hs_rehash_buckets)) {
 496                cfs_hash_bd_from_key(hs, hs->hs_buckets,
 497                                     hs->hs_cur_bits, key, bd);
 498        } else {
 499                LASSERT(hs->hs_rehash_bits != 0);
 500                cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
 501                                     hs->hs_rehash_bits, key, bd);
 502        }
 503}
 504EXPORT_SYMBOL(cfs_hash_bd_get);
 505
 506static inline void
 507cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
 508{
 509        if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
 510                return;
 511
 512        bd->bd_bucket->hsb_depmax = dep_cur;
 513# if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
 514        if (likely(warn_on_depth == 0 ||
 515                   max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
 516                return;
 517
 518        spin_lock(&hs->hs_dep_lock);
 519        hs->hs_dep_max  = dep_cur;
 520        hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
 521        hs->hs_dep_off  = bd->bd_offset;
 522        hs->hs_dep_bits = hs->hs_cur_bits;
 523        spin_unlock(&hs->hs_dep_lock);
 524
 525        cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
 526# endif
 527}
 528
 529void
 530cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 531                       struct hlist_node *hnode)
 532{
 533        int rc;
 534
 535        rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
 536        cfs_hash_bd_dep_record(hs, bd, rc);
 537        bd->bd_bucket->hsb_version++;
 538        if (unlikely(bd->bd_bucket->hsb_version == 0))
 539                bd->bd_bucket->hsb_version++;
 540        bd->bd_bucket->hsb_count++;
 541
 542        if (cfs_hash_with_counter(hs))
 543                atomic_inc(&hs->hs_count);
 544        if (!cfs_hash_with_no_itemref(hs))
 545                cfs_hash_get(hs, hnode);
 546}
 547EXPORT_SYMBOL(cfs_hash_bd_add_locked);
 548
 549void
 550cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 551                       struct hlist_node *hnode)
 552{
 553        hs->hs_hops->hop_hnode_del(hs, bd, hnode);
 554
 555        LASSERT(bd->bd_bucket->hsb_count > 0);
 556        bd->bd_bucket->hsb_count--;
 557        bd->bd_bucket->hsb_version++;
 558        if (unlikely(bd->bd_bucket->hsb_version == 0))
 559                bd->bd_bucket->hsb_version++;
 560
 561        if (cfs_hash_with_counter(hs)) {
 562                LASSERT(atomic_read(&hs->hs_count) > 0);
 563                atomic_dec(&hs->hs_count);
 564        }
 565        if (!cfs_hash_with_no_itemref(hs))
 566                cfs_hash_put_locked(hs, hnode);
 567}
 568EXPORT_SYMBOL(cfs_hash_bd_del_locked);
 569
 570void
 571cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
 572                        struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
 573{
 574        struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
 575        struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
 576        int rc;
 577
 578        if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
 579                return;
 580
 581        /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
 582         * in cfs_hash_bd_del/add_locked
 583         */
 584        hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
 585        rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
 586        cfs_hash_bd_dep_record(hs, bd_new, rc);
 587
 588        LASSERT(obkt->hsb_count > 0);
 589        obkt->hsb_count--;
 590        obkt->hsb_version++;
 591        if (unlikely(obkt->hsb_version == 0))
 592                obkt->hsb_version++;
 593        nbkt->hsb_count++;
 594        nbkt->hsb_version++;
 595        if (unlikely(nbkt->hsb_version == 0))
 596                nbkt->hsb_version++;
 597}
 598
 599enum {
 600        /** always set, for sanity (avoid ZERO intent) */
 601        CFS_HS_LOOKUP_MASK_FIND = BIT(0),
 602        /** return entry with a ref */
 603        CFS_HS_LOOKUP_MASK_REF  = BIT(1),
 604        /** add entry if not existing */
 605        CFS_HS_LOOKUP_MASK_ADD  = BIT(2),
 606        /** delete entry, ignore other masks */
 607        CFS_HS_LOOKUP_MASK_DEL  = BIT(3),
 608};
 609
 610enum cfs_hash_lookup_intent {
 611        /** return item w/o refcount */
 612        CFS_HS_LOOKUP_IT_PEEK    = CFS_HS_LOOKUP_MASK_FIND,
 613        /** return item with refcount */
 614        CFS_HS_LOOKUP_IT_FIND    = (CFS_HS_LOOKUP_MASK_FIND |
 615                                    CFS_HS_LOOKUP_MASK_REF),
 616        /** return item w/o refcount if existed, otherwise add */
 617        CFS_HS_LOOKUP_IT_ADD     = (CFS_HS_LOOKUP_MASK_FIND |
 618                                    CFS_HS_LOOKUP_MASK_ADD),
 619        /** return item with refcount if existed, otherwise add */
 620        CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND |
 621                                    CFS_HS_LOOKUP_MASK_ADD),
 622        /** delete if existed */
 623        CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND |
 624                                    CFS_HS_LOOKUP_MASK_DEL)
 625};
 626
 627static struct hlist_node *
 628cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 629                          const void *key, struct hlist_node *hnode,
 630                          enum cfs_hash_lookup_intent intent)
 631
 632{
 633        struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
 634        struct hlist_node *ehnode;
 635        struct hlist_node *match;
 636        int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
 637
 638        /* with this function, we can avoid a lot of useless refcount ops,
 639         * which are expensive atomic operations most time.
 640         */
 641        match = intent_add ? NULL : hnode;
 642        hlist_for_each(ehnode, hhead) {
 643                if (!cfs_hash_keycmp(hs, key, ehnode))
 644                        continue;
 645
 646                if (match && match != ehnode) /* can't match */
 647                        continue;
 648
 649                /* match and ... */
 650                if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
 651                        cfs_hash_bd_del_locked(hs, bd, ehnode);
 652                        return ehnode;
 653                }
 654
 655                /* caller wants refcount? */
 656                if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
 657                        cfs_hash_get(hs, ehnode);
 658                return ehnode;
 659        }
 660        /* no match item */
 661        if (!intent_add)
 662                return NULL;
 663
 664        LASSERT(hnode);
 665        cfs_hash_bd_add_locked(hs, bd, hnode);
 666        return hnode;
 667}
 668
 669struct hlist_node *
 670cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 671                          const void *key)
 672{
 673        return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
 674                                         CFS_HS_LOOKUP_IT_FIND);
 675}
 676EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
 677
 678struct hlist_node *
 679cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
 680                        const void *key)
 681{
 682        return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
 683                                         CFS_HS_LOOKUP_IT_PEEK);
 684}
 685EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
 686
 687static void
 688cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 689                       unsigned n, int excl)
 690{
 691        struct cfs_hash_bucket *prev = NULL;
 692        int i;
 693
 694        /**
 695         * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
 696         * NB: it's possible that several bds point to the same bucket but
 697         * have different bd::bd_offset, so need take care of deadlock.
 698         */
 699        cfs_hash_for_each_bd(bds, n, i) {
 700                if (prev == bds[i].bd_bucket)
 701                        continue;
 702
 703                LASSERT(!prev || prev->hsb_index < bds[i].bd_bucket->hsb_index);
 704                cfs_hash_bd_lock(hs, &bds[i], excl);
 705                prev = bds[i].bd_bucket;
 706        }
 707}
 708
 709static void
 710cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 711                         unsigned n, int excl)
 712{
 713        struct cfs_hash_bucket *prev = NULL;
 714        int i;
 715
 716        cfs_hash_for_each_bd(bds, n, i) {
 717                if (prev != bds[i].bd_bucket) {
 718                        cfs_hash_bd_unlock(hs, &bds[i], excl);
 719                        prev = bds[i].bd_bucket;
 720                }
 721        }
 722}
 723
 724static struct hlist_node *
 725cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 726                                unsigned n, const void *key)
 727{
 728        struct hlist_node *ehnode;
 729        unsigned i;
 730
 731        cfs_hash_for_each_bd(bds, n, i) {
 732                ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
 733                                                   CFS_HS_LOOKUP_IT_FIND);
 734                if (ehnode)
 735                        return ehnode;
 736        }
 737        return NULL;
 738}
 739
 740static struct hlist_node *
 741cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 742                                 unsigned n, const void *key,
 743                                 struct hlist_node *hnode, int noref)
 744{
 745        struct hlist_node *ehnode;
 746        int intent;
 747        unsigned i;
 748
 749        LASSERT(hnode);
 750        intent = (!noref * CFS_HS_LOOKUP_MASK_REF) | CFS_HS_LOOKUP_IT_PEEK;
 751
 752        cfs_hash_for_each_bd(bds, n, i) {
 753                ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
 754                                                   NULL, intent);
 755                if (ehnode)
 756                        return ehnode;
 757        }
 758
 759        if (i == 1) { /* only one bucket */
 760                cfs_hash_bd_add_locked(hs, &bds[0], hnode);
 761        } else {
 762                struct cfs_hash_bd mybd;
 763
 764                cfs_hash_bd_get(hs, key, &mybd);
 765                cfs_hash_bd_add_locked(hs, &mybd, hnode);
 766        }
 767
 768        return hnode;
 769}
 770
 771static struct hlist_node *
 772cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 773                                 unsigned n, const void *key,
 774                                 struct hlist_node *hnode)
 775{
 776        struct hlist_node *ehnode;
 777        unsigned int i;
 778
 779        cfs_hash_for_each_bd(bds, n, i) {
 780                ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
 781                                                   CFS_HS_LOOKUP_IT_FINDDEL);
 782                if (ehnode)
 783                        return ehnode;
 784        }
 785        return NULL;
 786}
 787
 788static void
 789cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
 790{
 791        int rc;
 792
 793        if (!bd2->bd_bucket)
 794                return;
 795
 796        if (!bd1->bd_bucket) {
 797                *bd1 = *bd2;
 798                bd2->bd_bucket = NULL;
 799                return;
 800        }
 801
 802        rc = cfs_hash_bd_compare(bd1, bd2);
 803        if (!rc)
 804                bd2->bd_bucket = NULL;
 805        else if (rc > 0)
 806                swap(*bd1, *bd2); /* swap bd1 and bd2 */
 807}
 808
 809void
 810cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key,
 811                     struct cfs_hash_bd *bds)
 812{
 813        /* NB: caller should hold hs_lock.rw if REHASH is set */
 814        cfs_hash_bd_from_key(hs, hs->hs_buckets,
 815                             hs->hs_cur_bits, key, &bds[0]);
 816        if (likely(!hs->hs_rehash_buckets)) {
 817                /* no rehash or not rehashing */
 818                bds[1].bd_bucket = NULL;
 819                return;
 820        }
 821
 822        LASSERT(hs->hs_rehash_bits != 0);
 823        cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
 824                             hs->hs_rehash_bits, key, &bds[1]);
 825
 826        cfs_hash_bd_order(&bds[0], &bds[1]);
 827}
 828
 829void
 830cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
 831{
 832        cfs_hash_multi_bd_lock(hs, bds, 2, excl);
 833}
 834
 835void
 836cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
 837{
 838        cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
 839}
 840
 841struct hlist_node *
 842cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 843                               const void *key)
 844{
 845        return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
 846}
 847
 848struct hlist_node *
 849cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 850                                const void *key, struct hlist_node *hnode,
 851                                int noref)
 852{
 853        return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
 854                                                hnode, noref);
 855}
 856
 857struct hlist_node *
 858cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
 859                                const void *key, struct hlist_node *hnode)
 860{
 861        return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
 862}
 863
 864static void
 865cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
 866                      int bkt_size, int prev_size, int size)
 867{
 868        int i;
 869
 870        for (i = prev_size; i < size; i++) {
 871                if (buckets[i])
 872                        LIBCFS_FREE(buckets[i], bkt_size);
 873        }
 874
 875        LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
 876}
 877
 878/*
 879 * Create or grow bucket memory. Return old_buckets if no allocation was
 880 * needed, the newly allocated buckets if allocation was needed and
 881 * successful, and NULL on error.
 882 */
 883static struct cfs_hash_bucket **
 884cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
 885                         unsigned int old_size, unsigned int new_size)
 886{
 887        struct cfs_hash_bucket **new_bkts;
 888        int i;
 889
 890        LASSERT(old_size == 0 || old_bkts);
 891
 892        if (old_bkts && old_size == new_size)
 893                return old_bkts;
 894
 895        LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
 896        if (!new_bkts)
 897                return NULL;
 898
 899        if (old_bkts) {
 900                memcpy(new_bkts, old_bkts,
 901                       min(old_size, new_size) * sizeof(*old_bkts));
 902        }
 903
 904        for (i = old_size; i < new_size; i++) {
 905                struct hlist_head *hhead;
 906                struct cfs_hash_bd bd;
 907
 908                LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
 909                if (!new_bkts[i]) {
 910                        cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
 911                                              old_size, new_size);
 912                        return NULL;
 913                }
 914
 915                new_bkts[i]->hsb_index   = i;
 916                new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
 917                new_bkts[i]->hsb_depmax  = -1; /* unknown */
 918                bd.bd_bucket = new_bkts[i];
 919                cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
 920                        INIT_HLIST_HEAD(hhead);
 921
 922                if (cfs_hash_with_no_lock(hs) ||
 923                    cfs_hash_with_no_bktlock(hs))
 924                        continue;
 925
 926                if (cfs_hash_with_rw_bktlock(hs))
 927                        rwlock_init(&new_bkts[i]->hsb_lock.rw);
 928                else if (cfs_hash_with_spin_bktlock(hs))
 929                        spin_lock_init(&new_bkts[i]->hsb_lock.spin);
 930                else
 931                        LBUG(); /* invalid use-case */
 932        }
 933        return new_bkts;
 934}
 935
 936/**
 937 * Initialize new libcfs hash, where:
 938 * @name     - Descriptive hash name
 939 * @cur_bits - Initial hash table size, in bits
 940 * @max_bits - Maximum allowed hash table resize, in bits
 941 * @ops      - Registered hash table operations
 942 * @flags    - CFS_HASH_REHASH enable synamic hash resizing
 943 *           - CFS_HASH_SORT enable chained hash sort
 944 */
 945static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
 946
 947#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
 948static int cfs_hash_dep_print(cfs_workitem_t *wi)
 949{
 950        struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
 951        int dep;
 952        int bkt;
 953        int off;
 954        int bits;
 955
 956        spin_lock(&hs->hs_dep_lock);
 957        dep  = hs->hs_dep_max;
 958        bkt  = hs->hs_dep_bkt;
 959        off  = hs->hs_dep_off;
 960        bits = hs->hs_dep_bits;
 961        spin_unlock(&hs->hs_dep_lock);
 962
 963        LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
 964                      hs->hs_name, bits, dep, bkt, off);
 965        spin_lock(&hs->hs_dep_lock);
 966        hs->hs_dep_bits = 0; /* mark as workitem done */
 967        spin_unlock(&hs->hs_dep_lock);
 968        return 0;
 969}
 970
 971static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
 972{
 973        spin_lock_init(&hs->hs_dep_lock);
 974        cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
 975}
 976
 977static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
 978{
 979        if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
 980                return;
 981
 982        spin_lock(&hs->hs_dep_lock);
 983        while (hs->hs_dep_bits != 0) {
 984                spin_unlock(&hs->hs_dep_lock);
 985                cond_resched();
 986                spin_lock(&hs->hs_dep_lock);
 987        }
 988        spin_unlock(&hs->hs_dep_lock);
 989}
 990
 991#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
 992
 993static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
 994static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
 995
 996#endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
 997
 998struct cfs_hash *
 999cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1000                unsigned bkt_bits, unsigned extra_bytes,
1001                unsigned min_theta, unsigned max_theta,
1002                struct cfs_hash_ops *ops, unsigned flags)
1003{
1004        struct cfs_hash *hs;
1005        int len;
1006
1007        CLASSERT(CFS_HASH_THETA_BITS < 15);
1008
1009        LASSERT(name);
1010        LASSERT(ops->hs_key);
1011        LASSERT(ops->hs_hash);
1012        LASSERT(ops->hs_object);
1013        LASSERT(ops->hs_keycmp);
1014        LASSERT(ops->hs_get);
1015        LASSERT(ops->hs_put_locked);
1016
1017        if ((flags & CFS_HASH_REHASH) != 0)
1018                flags |= CFS_HASH_COUNTER; /* must have counter */
1019
1020        LASSERT(cur_bits > 0);
1021        LASSERT(cur_bits >= bkt_bits);
1022        LASSERT(max_bits >= cur_bits && max_bits < 31);
1023        LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1024        LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1025                     (flags & CFS_HASH_NO_LOCK) == 0));
1026        LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0, ops->hs_keycpy));
1027
1028        len = (flags & CFS_HASH_BIGNAME) == 0 ?
1029              CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1030        LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
1031        if (!hs)
1032                return NULL;
1033
1034        strlcpy(hs->hs_name, name, len);
1035        hs->hs_flags = flags;
1036
1037        atomic_set(&hs->hs_refcount, 1);
1038        atomic_set(&hs->hs_count, 0);
1039
1040        cfs_hash_lock_setup(hs);
1041        cfs_hash_hlist_setup(hs);
1042
1043        hs->hs_cur_bits = (__u8)cur_bits;
1044        hs->hs_min_bits = (__u8)cur_bits;
1045        hs->hs_max_bits = (__u8)max_bits;
1046        hs->hs_bkt_bits = (__u8)bkt_bits;
1047
1048        hs->hs_ops         = ops;
1049        hs->hs_extra_bytes = extra_bytes;
1050        hs->hs_rehash_bits = 0;
1051        cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1052        cfs_hash_depth_wi_init(hs);
1053
1054        if (cfs_hash_with_rehash(hs))
1055                __cfs_hash_set_theta(hs, min_theta, max_theta);
1056
1057        hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1058                                                  CFS_HASH_NBKT(hs));
1059        if (hs->hs_buckets)
1060                return hs;
1061
1062        LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
1063        return NULL;
1064}
1065EXPORT_SYMBOL(cfs_hash_create);
1066
1067/**
1068 * Cleanup libcfs hash @hs.
1069 */
1070static void
1071cfs_hash_destroy(struct cfs_hash *hs)
1072{
1073        struct hlist_node *hnode;
1074        struct hlist_node *pos;
1075        struct cfs_hash_bd bd;
1076        int i;
1077
1078        LASSERT(hs);
1079        LASSERT(!cfs_hash_is_exiting(hs) &&
1080                !cfs_hash_is_iterating(hs));
1081
1082        /**
1083         * prohibit further rehashes, don't need any lock because
1084         * I'm the only (last) one can change it.
1085         */
1086        hs->hs_exiting = 1;
1087        if (cfs_hash_with_rehash(hs))
1088                cfs_hash_rehash_cancel(hs);
1089
1090        cfs_hash_depth_wi_cancel(hs);
1091        /* rehash should be done/canceled */
1092        LASSERT(hs->hs_buckets && !hs->hs_rehash_buckets);
1093
1094        cfs_hash_for_each_bucket(hs, &bd, i) {
1095                struct hlist_head *hhead;
1096
1097                LASSERT(bd.bd_bucket);
1098                /* no need to take this lock, just for consistent code */
1099                cfs_hash_bd_lock(hs, &bd, 1);
1100
1101                cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1102                        hlist_for_each_safe(hnode, pos, hhead) {
1103                                LASSERTF(!cfs_hash_with_assert_empty(hs),
1104                                         "hash %s bucket %u(%u) is not empty: %u items left\n",
1105                                         hs->hs_name, bd.bd_bucket->hsb_index,
1106                                         bd.bd_offset, bd.bd_bucket->hsb_count);
1107                                /* can't assert key valicate, because we
1108                                 * can interrupt rehash
1109                                 */
1110                                cfs_hash_bd_del_locked(hs, &bd, hnode);
1111                                cfs_hash_exit(hs, hnode);
1112                        }
1113                }
1114                LASSERT(bd.bd_bucket->hsb_count == 0);
1115                cfs_hash_bd_unlock(hs, &bd, 1);
1116                cond_resched();
1117        }
1118
1119        LASSERT(atomic_read(&hs->hs_count) == 0);
1120
1121        cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1122                              0, CFS_HASH_NBKT(hs));
1123        i = cfs_hash_with_bigname(hs) ?
1124            CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1125        LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
1126}
1127
1128struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
1129{
1130        if (atomic_inc_not_zero(&hs->hs_refcount))
1131                return hs;
1132        return NULL;
1133}
1134EXPORT_SYMBOL(cfs_hash_getref);
1135
1136void cfs_hash_putref(struct cfs_hash *hs)
1137{
1138        if (atomic_dec_and_test(&hs->hs_refcount))
1139                cfs_hash_destroy(hs);
1140}
1141EXPORT_SYMBOL(cfs_hash_putref);
1142
1143static inline int
1144cfs_hash_rehash_bits(struct cfs_hash *hs)
1145{
1146        if (cfs_hash_with_no_lock(hs) ||
1147            !cfs_hash_with_rehash(hs))
1148                return -EOPNOTSUPP;
1149
1150        if (unlikely(cfs_hash_is_exiting(hs)))
1151                return -ESRCH;
1152
1153        if (unlikely(cfs_hash_is_rehashing(hs)))
1154                return -EALREADY;
1155
1156        if (unlikely(cfs_hash_is_iterating(hs)))
1157                return -EAGAIN;
1158
1159        /* XXX: need to handle case with max_theta != 2.0
1160         *      and the case with min_theta != 0.5
1161         */
1162        if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1163            (__cfs_hash_theta(hs) > hs->hs_max_theta))
1164                return hs->hs_cur_bits + 1;
1165
1166        if (!cfs_hash_with_shrink(hs))
1167                return 0;
1168
1169        if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1170            (__cfs_hash_theta(hs) < hs->hs_min_theta))
1171                return hs->hs_cur_bits - 1;
1172
1173        return 0;
1174}
1175
1176/**
1177 * don't allow inline rehash if:
1178 * - user wants non-blocking change (add/del) on hash table
1179 * - too many elements
1180 */
1181static inline int
1182cfs_hash_rehash_inline(struct cfs_hash *hs)
1183{
1184        return !cfs_hash_with_nblk_change(hs) &&
1185               atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1186}
1187
1188/**
1189 * Add item @hnode to libcfs hash @hs using @key.  The registered
1190 * ops->hs_get function will be called when the item is added.
1191 */
1192void
1193cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1194{
1195        struct cfs_hash_bd bd;
1196        int bits;
1197
1198        LASSERT(hlist_unhashed(hnode));
1199
1200        cfs_hash_lock(hs, 0);
1201        cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1202
1203        cfs_hash_key_validate(hs, key, hnode);
1204        cfs_hash_bd_add_locked(hs, &bd, hnode);
1205
1206        cfs_hash_bd_unlock(hs, &bd, 1);
1207
1208        bits = cfs_hash_rehash_bits(hs);
1209        cfs_hash_unlock(hs, 0);
1210        if (bits > 0)
1211                cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1212}
1213EXPORT_SYMBOL(cfs_hash_add);
1214
1215static struct hlist_node *
1216cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
1217                     struct hlist_node *hnode, int noref)
1218{
1219        struct hlist_node *ehnode;
1220        struct cfs_hash_bd bds[2];
1221        int bits = 0;
1222
1223        LASSERT(hlist_unhashed(hnode));
1224
1225        cfs_hash_lock(hs, 0);
1226        cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1227
1228        cfs_hash_key_validate(hs, key, hnode);
1229        ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1230                                                 hnode, noref);
1231        cfs_hash_dual_bd_unlock(hs, bds, 1);
1232
1233        if (ehnode == hnode)    /* new item added */
1234                bits = cfs_hash_rehash_bits(hs);
1235        cfs_hash_unlock(hs, 0);
1236        if (bits > 0)
1237                cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1238
1239        return ehnode;
1240}
1241
1242/**
1243 * Add item @hnode to libcfs hash @hs using @key.  The registered
1244 * ops->hs_get function will be called if the item was added.
1245 * Returns 0 on success or -EALREADY on key collisions.
1246 */
1247int
1248cfs_hash_add_unique(struct cfs_hash *hs, const void *key,
1249                    struct hlist_node *hnode)
1250{
1251        return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1252               -EALREADY : 0;
1253}
1254EXPORT_SYMBOL(cfs_hash_add_unique);
1255
1256/**
1257 * Add item @hnode to libcfs hash @hs using @key.  If this @key
1258 * already exists in the hash then ops->hs_get will be called on the
1259 * conflicting entry and that entry will be returned to the caller.
1260 * Otherwise ops->hs_get is called on the item which was added.
1261 */
1262void *
1263cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
1264                        struct hlist_node *hnode)
1265{
1266        hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1267
1268        return cfs_hash_object(hs, hnode);
1269}
1270EXPORT_SYMBOL(cfs_hash_findadd_unique);
1271
1272/**
1273 * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1274 * is required to ensure the correct hash bucket is locked since there
1275 * is no direct linkage from the item to the bucket.  The object
1276 * removed from the hash will be returned and obs->hs_put is called
1277 * on the removed object.
1278 */
1279void *
1280cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
1281{
1282        void *obj = NULL;
1283        int bits = 0;
1284        struct cfs_hash_bd bds[2];
1285
1286        cfs_hash_lock(hs, 0);
1287        cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1288
1289        /* NB: do nothing if @hnode is not in hash table */
1290        if (!hnode || !hlist_unhashed(hnode)) {
1291                if (!bds[1].bd_bucket && hnode) {
1292                        cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1293                } else {
1294                        hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1295                                                                key, hnode);
1296                }
1297        }
1298
1299        if (hnode) {
1300                obj  = cfs_hash_object(hs, hnode);
1301                bits = cfs_hash_rehash_bits(hs);
1302        }
1303
1304        cfs_hash_dual_bd_unlock(hs, bds, 1);
1305        cfs_hash_unlock(hs, 0);
1306        if (bits > 0)
1307                cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1308
1309        return obj;
1310}
1311EXPORT_SYMBOL(cfs_hash_del);
1312
1313/**
1314 * Delete item given @key in libcfs hash @hs.  The first @key found in
1315 * the hash will be removed, if the key exists multiple times in the hash
1316 * @hs this function must be called once per key.  The removed object
1317 * will be returned and ops->hs_put is called on the removed object.
1318 */
1319void *
1320cfs_hash_del_key(struct cfs_hash *hs, const void *key)
1321{
1322        return cfs_hash_del(hs, key, NULL);
1323}
1324EXPORT_SYMBOL(cfs_hash_del_key);
1325
1326/**
1327 * Lookup an item using @key in the libcfs hash @hs and return it.
1328 * If the @key is found in the hash hs->hs_get() is called and the
1329 * matching objects is returned.  It is the callers responsibility
1330 * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1331 * when when finished with the object.  If the @key was not found
1332 * in the hash @hs NULL is returned.
1333 */
1334void *
1335cfs_hash_lookup(struct cfs_hash *hs, const void *key)
1336{
1337        void *obj = NULL;
1338        struct hlist_node *hnode;
1339        struct cfs_hash_bd bds[2];
1340
1341        cfs_hash_lock(hs, 0);
1342        cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1343
1344        hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1345        if (hnode)
1346                obj = cfs_hash_object(hs, hnode);
1347
1348        cfs_hash_dual_bd_unlock(hs, bds, 0);
1349        cfs_hash_unlock(hs, 0);
1350
1351        return obj;
1352}
1353EXPORT_SYMBOL(cfs_hash_lookup);
1354
1355static void
1356cfs_hash_for_each_enter(struct cfs_hash *hs)
1357{
1358        LASSERT(!cfs_hash_is_exiting(hs));
1359
1360        if (!cfs_hash_with_rehash(hs))
1361                return;
1362        /*
1363         * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1364         * because it's just an unreliable signal to rehash-thread,
1365         * rehash-thread will try to finish rehash ASAP when seeing this.
1366         */
1367        hs->hs_iterating = 1;
1368
1369        cfs_hash_lock(hs, 1);
1370        hs->hs_iterators++;
1371
1372        /* NB: iteration is mostly called by service thread,
1373         * we tend to cancel pending rehash-request, instead of
1374         * blocking service thread, we will relaunch rehash request
1375         * after iteration
1376         */
1377        if (cfs_hash_is_rehashing(hs))
1378                cfs_hash_rehash_cancel_locked(hs);
1379        cfs_hash_unlock(hs, 1);
1380}
1381
1382static void
1383cfs_hash_for_each_exit(struct cfs_hash *hs)
1384{
1385        int remained;
1386        int bits;
1387
1388        if (!cfs_hash_with_rehash(hs))
1389                return;
1390        cfs_hash_lock(hs, 1);
1391        remained = --hs->hs_iterators;
1392        bits = cfs_hash_rehash_bits(hs);
1393        cfs_hash_unlock(hs, 1);
1394        /* NB: it's race on cfs_has_t::hs_iterating, see above */
1395        if (remained == 0)
1396                hs->hs_iterating = 0;
1397        if (bits > 0) {
1398                cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1399                                    CFS_HASH_LOOP_HOG);
1400        }
1401}
1402
1403/**
1404 * For each item in the libcfs hash @hs call the passed callback @func
1405 * and pass to it as an argument each hash item and the private @data.
1406 *
1407 * a) the function may sleep!
1408 * b) during the callback:
1409 *    . the bucket lock is held so the callback must never sleep.
1410 *    . if @removal_safe is true, use can remove current item by
1411 *      cfs_hash_bd_del_locked
1412 */
1413static __u64
1414cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1415                        void *data, int remove_safe)
1416{
1417        struct hlist_node *hnode;
1418        struct hlist_node *pos;
1419        struct cfs_hash_bd bd;
1420        __u64 count = 0;
1421        int excl = !!remove_safe;
1422        int loop = 0;
1423        int i;
1424
1425        cfs_hash_for_each_enter(hs);
1426
1427        cfs_hash_lock(hs, 0);
1428        LASSERT(!cfs_hash_is_rehashing(hs));
1429
1430        cfs_hash_for_each_bucket(hs, &bd, i) {
1431                struct hlist_head *hhead;
1432
1433                cfs_hash_bd_lock(hs, &bd, excl);
1434                if (!func) { /* only glimpse size */
1435                        count += bd.bd_bucket->hsb_count;
1436                        cfs_hash_bd_unlock(hs, &bd, excl);
1437                        continue;
1438                }
1439
1440                cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1441                        hlist_for_each_safe(hnode, pos, hhead) {
1442                                cfs_hash_bucket_validate(hs, &bd, hnode);
1443                                count++;
1444                                loop++;
1445                                if (func(hs, &bd, hnode, data)) {
1446                                        cfs_hash_bd_unlock(hs, &bd, excl);
1447                                        goto out;
1448                                }
1449                        }
1450                }
1451                cfs_hash_bd_unlock(hs, &bd, excl);
1452                if (loop < CFS_HASH_LOOP_HOG)
1453                        continue;
1454                loop = 0;
1455                cfs_hash_unlock(hs, 0);
1456                cond_resched();
1457                cfs_hash_lock(hs, 0);
1458        }
1459 out:
1460        cfs_hash_unlock(hs, 0);
1461
1462        cfs_hash_for_each_exit(hs);
1463        return count;
1464}
1465
1466struct cfs_hash_cond_arg {
1467        cfs_hash_cond_opt_cb_t  func;
1468        void                    *arg;
1469};
1470
1471static int
1472cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1473                         struct hlist_node *hnode, void *data)
1474{
1475        struct cfs_hash_cond_arg *cond = data;
1476
1477        if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1478                cfs_hash_bd_del_locked(hs, bd, hnode);
1479        return 0;
1480}
1481
1482/**
1483 * Delete item from the libcfs hash @hs when @func return true.
1484 * The write lock being hold during loop for each bucket to avoid
1485 * any object be reference.
1486 */
1487void
1488cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
1489{
1490        struct cfs_hash_cond_arg arg = {
1491                .func   = func,
1492                .arg    = data,
1493        };
1494
1495        cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1496}
1497EXPORT_SYMBOL(cfs_hash_cond_del);
1498
1499void
1500cfs_hash_for_each(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1501                  void *data)
1502{
1503        cfs_hash_for_each_tight(hs, func, data, 0);
1504}
1505EXPORT_SYMBOL(cfs_hash_for_each);
1506
1507void
1508cfs_hash_for_each_safe(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1509                       void *data)
1510{
1511        cfs_hash_for_each_tight(hs, func, data, 1);
1512}
1513EXPORT_SYMBOL(cfs_hash_for_each_safe);
1514
1515static int
1516cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
1517              struct hlist_node *hnode, void *data)
1518{
1519        *(int *)data = 0;
1520        return 1; /* return 1 to break the loop */
1521}
1522
1523int
1524cfs_hash_is_empty(struct cfs_hash *hs)
1525{
1526        int empty = 1;
1527
1528        cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1529        return empty;
1530}
1531EXPORT_SYMBOL(cfs_hash_is_empty);
1532
1533__u64
1534cfs_hash_size_get(struct cfs_hash *hs)
1535{
1536        return cfs_hash_with_counter(hs) ?
1537               atomic_read(&hs->hs_count) :
1538               cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1539}
1540EXPORT_SYMBOL(cfs_hash_size_get);
1541
1542/*
1543 * cfs_hash_for_each_relax:
1544 * Iterate the hash table and call @func on each item without
1545 * any lock. This function can't guarantee to finish iteration
1546 * if these features are enabled:
1547 *
1548 *  a. if rehash_key is enabled, an item can be moved from
1549 *     one bucket to another bucket
1550 *  b. user can remove non-zero-ref item from hash-table,
1551 *     so the item can be removed from hash-table, even worse,
1552 *     it's possible that user changed key and insert to another
1553 *     hash bucket.
1554 * there's no way for us to finish iteration correctly on previous
1555 * two cases, so iteration has to be stopped on change.
1556 */
1557static int
1558cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1559                        void *data)
1560{
1561        struct hlist_node *hnode;
1562        struct hlist_node *tmp;
1563        struct cfs_hash_bd bd;
1564        __u32 version;
1565        int count = 0;
1566        int stop_on_change;
1567        int rc;
1568        int i;
1569
1570        stop_on_change = cfs_hash_with_rehash_key(hs) ||
1571                         !cfs_hash_with_no_itemref(hs) ||
1572                         !hs->hs_ops->hs_put_locked;
1573        cfs_hash_lock(hs, 0);
1574        LASSERT(!cfs_hash_is_rehashing(hs));
1575
1576        cfs_hash_for_each_bucket(hs, &bd, i) {
1577                struct hlist_head *hhead;
1578
1579                cfs_hash_bd_lock(hs, &bd, 0);
1580                version = cfs_hash_bd_version_get(&bd);
1581
1582                cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1583                        for (hnode = hhead->first; hnode;) {
1584                                cfs_hash_bucket_validate(hs, &bd, hnode);
1585                                cfs_hash_get(hs, hnode);
1586                                cfs_hash_bd_unlock(hs, &bd, 0);
1587                                cfs_hash_unlock(hs, 0);
1588
1589                                rc = func(hs, &bd, hnode, data);
1590                                if (stop_on_change)
1591                                        cfs_hash_put(hs, hnode);
1592                                cond_resched();
1593                                count++;
1594
1595                                cfs_hash_lock(hs, 0);
1596                                cfs_hash_bd_lock(hs, &bd, 0);
1597                                if (!stop_on_change) {
1598                                        tmp = hnode->next;
1599                                        cfs_hash_put_locked(hs, hnode);
1600                                        hnode = tmp;
1601                                } else { /* bucket changed? */
1602                                        if (version !=
1603                                            cfs_hash_bd_version_get(&bd))
1604                                                break;
1605                                        /* safe to continue because no change */
1606                                        hnode = hnode->next;
1607                                }
1608                                if (rc) /* callback wants to break iteration */
1609                                        break;
1610                        }
1611                        if (rc) /* callback wants to break iteration */
1612                                break;
1613                }
1614                cfs_hash_bd_unlock(hs, &bd, 0);
1615                if (rc) /* callback wants to break iteration */
1616                        break;
1617        }
1618        cfs_hash_unlock(hs, 0);
1619
1620        return count;
1621}
1622
1623int
1624cfs_hash_for_each_nolock(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1625                         void *data)
1626{
1627        if (cfs_hash_with_no_lock(hs) ||
1628            cfs_hash_with_rehash_key(hs) ||
1629            !cfs_hash_with_no_itemref(hs))
1630                return -EOPNOTSUPP;
1631
1632        if (!hs->hs_ops->hs_get ||
1633            (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked))
1634                return -EOPNOTSUPP;
1635
1636        cfs_hash_for_each_enter(hs);
1637        cfs_hash_for_each_relax(hs, func, data);
1638        cfs_hash_for_each_exit(hs);
1639
1640        return 0;
1641}
1642EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1643
1644/**
1645 * For each hash bucket in the libcfs hash @hs call the passed callback
1646 * @func until all the hash buckets are empty.  The passed callback @func
1647 * or the previously registered callback hs->hs_put must remove the item
1648 * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1649 * functions.  No rwlocks will be held during the callback @func it is
1650 * safe to sleep if needed.  This function will not terminate until the
1651 * hash is empty.  Note it is still possible to concurrently add new
1652 * items in to the hash.  It is the callers responsibility to ensure
1653 * the required locking is in place to prevent concurrent insertions.
1654 */
1655int
1656cfs_hash_for_each_empty(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
1657                        void *data)
1658{
1659        unsigned i = 0;
1660
1661        if (cfs_hash_with_no_lock(hs))
1662                return -EOPNOTSUPP;
1663
1664        if (!hs->hs_ops->hs_get ||
1665            (!hs->hs_ops->hs_put && !hs->hs_ops->hs_put_locked))
1666                return -EOPNOTSUPP;
1667
1668        cfs_hash_for_each_enter(hs);
1669        while (cfs_hash_for_each_relax(hs, func, data)) {
1670                CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1671                       hs->hs_name, i++);
1672        }
1673        cfs_hash_for_each_exit(hs);
1674        return 0;
1675}
1676EXPORT_SYMBOL(cfs_hash_for_each_empty);
1677
1678void
1679cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
1680                        cfs_hash_for_each_cb_t func, void *data)
1681{
1682        struct hlist_head *hhead;
1683        struct hlist_node *hnode;
1684        struct cfs_hash_bd bd;
1685
1686        cfs_hash_for_each_enter(hs);
1687        cfs_hash_lock(hs, 0);
1688        if (hindex >= CFS_HASH_NHLIST(hs))
1689                goto out;
1690
1691        cfs_hash_bd_index_set(hs, hindex, &bd);
1692
1693        cfs_hash_bd_lock(hs, &bd, 0);
1694        hhead = cfs_hash_bd_hhead(hs, &bd);
1695        hlist_for_each(hnode, hhead) {
1696                if (func(hs, &bd, hnode, data))
1697                        break;
1698        }
1699        cfs_hash_bd_unlock(hs, &bd, 0);
1700out:
1701        cfs_hash_unlock(hs, 0);
1702        cfs_hash_for_each_exit(hs);
1703}
1704EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1705
1706/*
1707 * For each item in the libcfs hash @hs which matches the @key call
1708 * the passed callback @func and pass to it as an argument each hash
1709 * item and the private @data. During the callback the bucket lock
1710 * is held so the callback must never sleep.
1711   */
1712void
1713cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
1714                      cfs_hash_for_each_cb_t func, void *data)
1715{
1716        struct hlist_node *hnode;
1717        struct cfs_hash_bd bds[2];
1718        unsigned int i;
1719
1720        cfs_hash_lock(hs, 0);
1721
1722        cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1723
1724        cfs_hash_for_each_bd(bds, 2, i) {
1725                struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1726
1727                hlist_for_each(hnode, hlist) {
1728                        cfs_hash_bucket_validate(hs, &bds[i], hnode);
1729
1730                        if (cfs_hash_keycmp(hs, key, hnode)) {
1731                                if (func(hs, &bds[i], hnode, data))
1732                                        break;
1733                        }
1734                }
1735        }
1736
1737        cfs_hash_dual_bd_unlock(hs, bds, 0);
1738        cfs_hash_unlock(hs, 0);
1739}
1740EXPORT_SYMBOL(cfs_hash_for_each_key);
1741
1742/**
1743 * Rehash the libcfs hash @hs to the given @bits.  This can be used
1744 * to grow the hash size when excessive chaining is detected, or to
1745 * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1746 * flag is set in @hs the libcfs hash may be dynamically rehashed
1747 * during addition or removal if the hash's theta value exceeds
1748 * either the hs->hs_min_theta or hs->max_theta values.  By default
1749 * these values are tuned to keep the chained hash depth small, and
1750 * this approach assumes a reasonably uniform hashing function.  The
1751 * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1752 */
1753void
1754cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
1755{
1756        int i;
1757
1758        /* need hold cfs_hash_lock(hs, 1) */
1759        LASSERT(cfs_hash_with_rehash(hs) &&
1760                !cfs_hash_with_no_lock(hs));
1761
1762        if (!cfs_hash_is_rehashing(hs))
1763                return;
1764
1765        if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1766                hs->hs_rehash_bits = 0;
1767                return;
1768        }
1769
1770        for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1771                cfs_hash_unlock(hs, 1);
1772                /* raise console warning while waiting too long */
1773                CDEBUG(is_power_of_2(i >> 3) ? D_WARNING : D_INFO,
1774                       "hash %s is still rehashing, rescheded %d\n",
1775                       hs->hs_name, i - 1);
1776                cond_resched();
1777                cfs_hash_lock(hs, 1);
1778        }
1779}
1780
1781void
1782cfs_hash_rehash_cancel(struct cfs_hash *hs)
1783{
1784        cfs_hash_lock(hs, 1);
1785        cfs_hash_rehash_cancel_locked(hs);
1786        cfs_hash_unlock(hs, 1);
1787}
1788
1789int
1790cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
1791{
1792        int rc;
1793
1794        LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1795
1796        cfs_hash_lock(hs, 1);
1797
1798        rc = cfs_hash_rehash_bits(hs);
1799        if (rc <= 0) {
1800                cfs_hash_unlock(hs, 1);
1801                return rc;
1802        }
1803
1804        hs->hs_rehash_bits = rc;
1805        if (!do_rehash) {
1806                /* launch and return */
1807                cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1808                cfs_hash_unlock(hs, 1);
1809                return 0;
1810        }
1811
1812        /* rehash right now */
1813        cfs_hash_unlock(hs, 1);
1814
1815        return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1816}
1817
1818static int
1819cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
1820{
1821        struct cfs_hash_bd new;
1822        struct hlist_head *hhead;
1823        struct hlist_node *hnode;
1824        struct hlist_node *pos;
1825        void *key;
1826        int c = 0;
1827
1828        /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1829        cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1830                hlist_for_each_safe(hnode, pos, hhead) {
1831                        key = cfs_hash_key(hs, hnode);
1832                        LASSERT(key);
1833                        /* Validate hnode is in the correct bucket. */
1834                        cfs_hash_bucket_validate(hs, old, hnode);
1835                        /*
1836                         * Delete from old hash bucket; move to new bucket.
1837                         * ops->hs_key must be defined.
1838                         */
1839                        cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1840                                             hs->hs_rehash_bits, key, &new);
1841                        cfs_hash_bd_move_locked(hs, old, &new, hnode);
1842                        c++;
1843                }
1844        }
1845
1846        return c;
1847}
1848
1849static int
1850cfs_hash_rehash_worker(cfs_workitem_t *wi)
1851{
1852        struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
1853        struct cfs_hash_bucket **bkts;
1854        struct cfs_hash_bd bd;
1855        unsigned int old_size;
1856        unsigned int new_size;
1857        int bsize;
1858        int count = 0;
1859        int rc = 0;
1860        int i;
1861
1862        LASSERT(hs && cfs_hash_with_rehash(hs));
1863
1864        cfs_hash_lock(hs, 0);
1865        LASSERT(cfs_hash_is_rehashing(hs));
1866
1867        old_size = CFS_HASH_NBKT(hs);
1868        new_size = CFS_HASH_RH_NBKT(hs);
1869
1870        cfs_hash_unlock(hs, 0);
1871
1872        /*
1873         * don't need hs::hs_rwlock for hs::hs_buckets,
1874         * because nobody can change bkt-table except me.
1875         */
1876        bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1877                                        old_size, new_size);
1878        cfs_hash_lock(hs, 1);
1879        if (!bkts) {
1880                rc = -ENOMEM;
1881                goto out;
1882        }
1883
1884        if (bkts == hs->hs_buckets) {
1885                bkts = NULL; /* do nothing */
1886                goto out;
1887        }
1888
1889        rc = __cfs_hash_theta(hs);
1890        if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1891                /* free the new allocated bkt-table */
1892                old_size = new_size;
1893                new_size = CFS_HASH_NBKT(hs);
1894                rc = -EALREADY;
1895                goto out;
1896        }
1897
1898        LASSERT(!hs->hs_rehash_buckets);
1899        hs->hs_rehash_buckets = bkts;
1900
1901        rc = 0;
1902        cfs_hash_for_each_bucket(hs, &bd, i) {
1903                if (cfs_hash_is_exiting(hs)) {
1904                        rc = -ESRCH;
1905                        /* someone wants to destroy the hash, abort now */
1906                        if (old_size < new_size) /* OK to free old bkt-table */
1907                                break;
1908                        /* it's shrinking, need free new bkt-table */
1909                        hs->hs_rehash_buckets = NULL;
1910                        old_size = new_size;
1911                        new_size = CFS_HASH_NBKT(hs);
1912                        goto out;
1913                }
1914
1915                count += cfs_hash_rehash_bd(hs, &bd);
1916                if (count < CFS_HASH_LOOP_HOG ||
1917                    cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1918                        continue;
1919                }
1920
1921                count = 0;
1922                cfs_hash_unlock(hs, 1);
1923                cond_resched();
1924                cfs_hash_lock(hs, 1);
1925        }
1926
1927        hs->hs_rehash_count++;
1928
1929        bkts = hs->hs_buckets;
1930        hs->hs_buckets = hs->hs_rehash_buckets;
1931        hs->hs_rehash_buckets = NULL;
1932
1933        hs->hs_cur_bits = hs->hs_rehash_bits;
1934out:
1935        hs->hs_rehash_bits = 0;
1936        if (rc == -ESRCH) /* never be scheduled again */
1937                cfs_wi_exit(cfs_sched_rehash, wi);
1938        bsize = cfs_hash_bkt_size(hs);
1939        cfs_hash_unlock(hs, 1);
1940        /* can't refer to @hs anymore because it could be destroyed */
1941        if (bkts)
1942                cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1943        if (rc != 0)
1944                CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
1945        /* return 1 only if cfs_wi_exit is called */
1946        return rc == -ESRCH;
1947}
1948
1949/**
1950 * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
1951 * @old_key must be provided to locate the objects previous location
1952 * in the hash, and the @new_key will be used to reinsert the object.
1953 * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1954 * combo when it is critical that there is no window in time where the
1955 * object is missing from the hash.  When an object is being rehashed
1956 * the registered cfs_hash_get() and cfs_hash_put() functions will
1957 * not be called.
1958 */
1959void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
1960                         void *new_key, struct hlist_node *hnode)
1961{
1962        struct cfs_hash_bd bds[3];
1963        struct cfs_hash_bd old_bds[2];
1964        struct cfs_hash_bd new_bd;
1965
1966        LASSERT(!hlist_unhashed(hnode));
1967
1968        cfs_hash_lock(hs, 0);
1969
1970        cfs_hash_dual_bd_get(hs, old_key, old_bds);
1971        cfs_hash_bd_get(hs, new_key, &new_bd);
1972
1973        bds[0] = old_bds[0];
1974        bds[1] = old_bds[1];
1975        bds[2] = new_bd;
1976
1977        /* NB: bds[0] and bds[1] are ordered already */
1978        cfs_hash_bd_order(&bds[1], &bds[2]);
1979        cfs_hash_bd_order(&bds[0], &bds[1]);
1980
1981        cfs_hash_multi_bd_lock(hs, bds, 3, 1);
1982        if (likely(!old_bds[1].bd_bucket)) {
1983                cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
1984        } else {
1985                cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
1986                cfs_hash_bd_add_locked(hs, &new_bd, hnode);
1987        }
1988        /* overwrite key inside locks, otherwise may screw up with
1989         * other operations, i.e: rehash
1990         */
1991        cfs_hash_keycpy(hs, hnode, new_key);
1992
1993        cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
1994        cfs_hash_unlock(hs, 0);
1995}
1996EXPORT_SYMBOL(cfs_hash_rehash_key);
1997
1998void cfs_hash_debug_header(struct seq_file *m)
1999{
2000        seq_printf(m, "%-*s   cur   min   max theta t-min t-max flags rehash   count  maxdep maxdepb distribution\n",
2001                   CFS_HASH_BIGNAME_LEN, "name");
2002}
2003EXPORT_SYMBOL(cfs_hash_debug_header);
2004
2005static struct cfs_hash_bucket **
2006cfs_hash_full_bkts(struct cfs_hash *hs)
2007{
2008        /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2009        if (!hs->hs_rehash_buckets)
2010                return hs->hs_buckets;
2011
2012        LASSERT(hs->hs_rehash_bits != 0);
2013        return hs->hs_rehash_bits > hs->hs_cur_bits ?
2014               hs->hs_rehash_buckets : hs->hs_buckets;
2015}
2016
2017static unsigned int
2018cfs_hash_full_nbkt(struct cfs_hash *hs)
2019{
2020        /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2021        if (!hs->hs_rehash_buckets)
2022                return CFS_HASH_NBKT(hs);
2023
2024        LASSERT(hs->hs_rehash_bits != 0);
2025        return hs->hs_rehash_bits > hs->hs_cur_bits ?
2026               CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2027}
2028
2029void cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
2030{
2031        int dist[8] = { 0, };
2032        int maxdep = -1;
2033        int maxdepb = -1;
2034        int total = 0;
2035        int theta;
2036        int i;
2037
2038        cfs_hash_lock(hs, 0);
2039        theta = __cfs_hash_theta(hs);
2040
2041        seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d  0x%02x %6d ",
2042                   CFS_HASH_BIGNAME_LEN, hs->hs_name,
2043                   1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2044                   1 << hs->hs_max_bits,
2045                   __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2046                   __cfs_hash_theta_int(hs->hs_min_theta),
2047                   __cfs_hash_theta_frac(hs->hs_min_theta),
2048                   __cfs_hash_theta_int(hs->hs_max_theta),
2049                   __cfs_hash_theta_frac(hs->hs_max_theta),
2050                   hs->hs_flags, hs->hs_rehash_count);
2051
2052        /*
2053         * The distribution is a summary of the chained hash depth in
2054         * each of the libcfs hash buckets.  Each buckets hsb_count is
2055         * divided by the hash theta value and used to generate a
2056         * histogram of the hash distribution.  A uniform hash will
2057         * result in all hash buckets being close to the average thus
2058         * only the first few entries in the histogram will be non-zero.
2059         * If you hash function results in a non-uniform hash the will
2060         * be observable by outlier bucks in the distribution histogram.
2061         *
2062         * Uniform hash distribution:           128/128/0/0/0/0/0/0
2063         * Non-Uniform hash distribution:       128/125/0/0/0/0/2/1
2064         */
2065        for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2066                struct cfs_hash_bd bd;
2067
2068                bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2069                cfs_hash_bd_lock(hs, &bd, 0);
2070                if (maxdep < bd.bd_bucket->hsb_depmax) {
2071                        maxdep  = bd.bd_bucket->hsb_depmax;
2072                        maxdepb = ffz(~maxdep);
2073                }
2074                total += bd.bd_bucket->hsb_count;
2075                dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
2076                cfs_hash_bd_unlock(hs, &bd, 0);
2077        }
2078
2079        seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2080        for (i = 0; i < 8; i++)
2081                seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
2082
2083        cfs_hash_unlock(hs, 0);
2084}
2085EXPORT_SYMBOL(cfs_hash_debug_str);
2086