linux/drivers/staging/lustre/lustre/libcfs/hash.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  19 *
  20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  21 * CA 95054 USA or visit www.sun.com if you need additional information or
  22 * have any questions.
  23 *
  24 * GPL HEADER END
  25 */
  26/*
  27 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
  28 * Use is subject to license terms.
  29 *
  30 * Copyright (c) 2011, 2012, Intel Corporation.
  31 */
  32/*
  33 * This file is part of Lustre, http://www.lustre.org/
  34 * Lustre is a trademark of Sun Microsystems, Inc.
  35 *
  36 * libcfs/libcfs/hash.c
  37 *
  38 * Implement a hash class for hash process in lustre system.
  39 *
  40 * Author: YuZhangyong <yzy@clusterfs.com>
  41 *
  42 * 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
  43 * - Simplified API and improved documentation
  44 * - Added per-hash feature flags:
  45 *   * CFS_HASH_DEBUG additional validation
  46 *   * CFS_HASH_REHASH dynamic rehashing
  47 * - Added per-hash statistics
  48 * - General performance enhancements
  49 *
  50 * 2009-07-31: Liang Zhen <zhen.liang@sun.com>
  51 * - move all stuff to libcfs
  52 * - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
  53 * - ignore hs_rwlock if without CFS_HASH_REHASH setting
  54 * - buckets are allocated one by one(intead of contiguous memory),
  55 *   to avoid unnecessary cacheline conflict
  56 *
  57 * 2010-03-01: Liang Zhen <zhen.liang@sun.com>
  58 * - "bucket" is a group of hlist_head now, user can speicify bucket size
  59 *   by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
  60 *   one lock for reducing memory overhead.
  61 *
  62 * - support lockless hash, caller will take care of locks:
  63 *   avoid lock overhead for hash tables that are already protected
  64 *   by locking in the caller for another reason
  65 *
  66 * - support both spin_lock/rwlock for bucket:
  67 *   overhead of spinlock contention is lower than read/write
  68 *   contention of rwlock, so using spinlock to serialize operations on
  69 *   bucket is more reasonable for those frequently changed hash tables
  70 *
  71 * - support one-single lock mode:
  72 *   one lock to protect all hash operations to avoid overhead of
  73 *   multiple locks if hash table is always small
  74 *
  75 * - removed a lot of unnecessary addref & decref on hash element:
  76 *   addref & decref are atomic operations in many use-cases which
  77 *   are expensive.
  78 *
  79 * - support non-blocking cfs_hash_add() and cfs_hash_findadd():
  80 *   some lustre use-cases require these functions to be strictly
  81 *   non-blocking, we need to schedule required rehash on a different
  82 *   thread on those cases.
  83 *
  84 * - safer rehash on large hash table
  85 *   In old implementation, rehash function will exclusively lock the
  86 *   hash table and finish rehash in one batch, it's dangerous on SMP
  87 *   system because rehash millions of elements could take long time.
  88 *   New implemented rehash can release lock and relax CPU in middle
  89 *   of rehash, it's safe for another thread to search/change on the
  90 *   hash table even it's in rehasing.
  91 *
  92 * - support two different refcount modes
  93 *   . hash table has refcount on element
  94 *   . hash table doesn't change refcount on adding/removing element
  95 *
  96 * - support long name hash table (for param-tree)
  97 *
  98 * - fix a bug for cfs_hash_rehash_key:
  99 *   in old implementation, cfs_hash_rehash_key could screw up the
 100 *   hash-table because @key is overwritten without any protection.
 101 *   Now we need user to define hs_keycpy for those rehash enabled
 102 *   hash tables, cfs_hash_rehash_key will overwrite hash-key
 103 *   inside lock by calling hs_keycpy.
 104 *
 105 * - better hash iteration:
 106 *   Now we support both locked iteration & lockless iteration of hash
 107 *   table. Also, user can break the iteration by return 1 in callback.
 108 */
 109
 110#include <linux/libcfs/libcfs.h>
 111#include <linux/seq_file.h>
 112
 113#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
 114static unsigned int warn_on_depth = 8;
 115CFS_MODULE_PARM(warn_on_depth, "i", uint, 0644,
 116                "warning when hash depth is high.");
 117#endif
 118
 119struct cfs_wi_sched *cfs_sched_rehash;
 120
 121static inline void
 122cfs_hash_nl_lock(cfs_hash_lock_t *lock, int exclusive) {}
 123
 124static inline void
 125cfs_hash_nl_unlock(cfs_hash_lock_t *lock, int exclusive) {}
 126
 127static inline void
 128cfs_hash_spin_lock(cfs_hash_lock_t *lock, int exclusive)
 129{
 130        spin_lock(&lock->spin);
 131}
 132
 133static inline void
 134cfs_hash_spin_unlock(cfs_hash_lock_t *lock, int exclusive)
 135{
 136        spin_unlock(&lock->spin);
 137}
 138
 139static inline void
 140cfs_hash_rw_lock(cfs_hash_lock_t *lock, int exclusive)
 141{
 142        if (!exclusive)
 143                read_lock(&lock->rw);
 144        else
 145                write_lock(&lock->rw);
 146}
 147
 148static inline void
 149cfs_hash_rw_unlock(cfs_hash_lock_t *lock, int exclusive)
 150{
 151        if (!exclusive)
 152                read_unlock(&lock->rw);
 153        else
 154                write_unlock(&lock->rw);
 155}
 156
 157/** No lock hash */
 158static cfs_hash_lock_ops_t cfs_hash_nl_lops =
 159{
 160        .hs_lock        = cfs_hash_nl_lock,
 161        .hs_unlock      = cfs_hash_nl_unlock,
 162        .hs_bkt_lock    = cfs_hash_nl_lock,
 163        .hs_bkt_unlock  = cfs_hash_nl_unlock,
 164};
 165
 166/** no bucket lock, one spinlock to protect everything */
 167static cfs_hash_lock_ops_t cfs_hash_nbl_lops =
 168{
 169        .hs_lock        = cfs_hash_spin_lock,
 170        .hs_unlock      = cfs_hash_spin_unlock,
 171        .hs_bkt_lock    = cfs_hash_nl_lock,
 172        .hs_bkt_unlock  = cfs_hash_nl_unlock,
 173};
 174
 175/** spin bucket lock, rehash is enabled */
 176static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops =
 177{
 178        .hs_lock        = cfs_hash_rw_lock,
 179        .hs_unlock      = cfs_hash_rw_unlock,
 180        .hs_bkt_lock    = cfs_hash_spin_lock,
 181        .hs_bkt_unlock  = cfs_hash_spin_unlock,
 182};
 183
 184/** rw bucket lock, rehash is enabled */
 185static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops =
 186{
 187        .hs_lock        = cfs_hash_rw_lock,
 188        .hs_unlock      = cfs_hash_rw_unlock,
 189        .hs_bkt_lock    = cfs_hash_rw_lock,
 190        .hs_bkt_unlock  = cfs_hash_rw_unlock,
 191};
 192
 193/** spin bucket lock, rehash is disabled */
 194static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops =
 195{
 196        .hs_lock        = cfs_hash_nl_lock,
 197        .hs_unlock      = cfs_hash_nl_unlock,
 198        .hs_bkt_lock    = cfs_hash_spin_lock,
 199        .hs_bkt_unlock  = cfs_hash_spin_unlock,
 200};
 201
 202/** rw bucket lock, rehash is disabled */
 203static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops =
 204{
 205        .hs_lock        = cfs_hash_nl_lock,
 206        .hs_unlock      = cfs_hash_nl_unlock,
 207        .hs_bkt_lock    = cfs_hash_rw_lock,
 208        .hs_bkt_unlock  = cfs_hash_rw_unlock,
 209};
 210
 211static void
 212cfs_hash_lock_setup(cfs_hash_t *hs)
 213{
 214        if (cfs_hash_with_no_lock(hs)) {
 215                hs->hs_lops = &cfs_hash_nl_lops;
 216
 217        } else if (cfs_hash_with_no_bktlock(hs)) {
 218                hs->hs_lops = &cfs_hash_nbl_lops;
 219                spin_lock_init(&hs->hs_lock.spin);
 220
 221        } else if (cfs_hash_with_rehash(hs)) {
 222                rwlock_init(&hs->hs_lock.rw);
 223
 224                if (cfs_hash_with_rw_bktlock(hs))
 225                        hs->hs_lops = &cfs_hash_bkt_rw_lops;
 226                else if (cfs_hash_with_spin_bktlock(hs))
 227                        hs->hs_lops = &cfs_hash_bkt_spin_lops;
 228                else
 229                        LBUG();
 230        } else {
 231                if (cfs_hash_with_rw_bktlock(hs))
 232                        hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
 233                else if (cfs_hash_with_spin_bktlock(hs))
 234                        hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
 235                else
 236                        LBUG();
 237        }
 238}
 239
 240/**
 241 * Simple hash head without depth tracking
 242 * new element is always added to head of hlist
 243 */
 244typedef struct {
 245        struct hlist_head       hh_head;        /**< entries list */
 246} cfs_hash_head_t;
 247
 248static int
 249cfs_hash_hh_hhead_size(cfs_hash_t *hs)
 250{
 251        return sizeof(cfs_hash_head_t);
 252}
 253
 254static struct hlist_head *
 255cfs_hash_hh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
 256{
 257        cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
 258
 259        return &head[bd->bd_offset].hh_head;
 260}
 261
 262static int
 263cfs_hash_hh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 264                      struct hlist_node *hnode)
 265{
 266        hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
 267        return -1; /* unknown depth */
 268}
 269
 270static int
 271cfs_hash_hh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 272                      struct hlist_node *hnode)
 273{
 274        hlist_del_init(hnode);
 275        return -1; /* unknown depth */
 276}
 277
 278/**
 279 * Simple hash head with depth tracking
 280 * new element is always added to head of hlist
 281 */
 282typedef struct {
 283        struct hlist_head       hd_head;        /**< entries list */
 284        unsigned int        hd_depth;       /**< list length */
 285} cfs_hash_head_dep_t;
 286
 287static int
 288cfs_hash_hd_hhead_size(cfs_hash_t *hs)
 289{
 290        return sizeof(cfs_hash_head_dep_t);
 291}
 292
 293static struct hlist_head *
 294cfs_hash_hd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
 295{
 296        cfs_hash_head_dep_t   *head;
 297
 298        head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
 299        return &head[bd->bd_offset].hd_head;
 300}
 301
 302static int
 303cfs_hash_hd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 304                      struct hlist_node *hnode)
 305{
 306        cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
 307                                               cfs_hash_head_dep_t, hd_head);
 308        hlist_add_head(hnode, &hh->hd_head);
 309        return ++hh->hd_depth;
 310}
 311
 312static int
 313cfs_hash_hd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 314                      struct hlist_node *hnode)
 315{
 316        cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
 317                                               cfs_hash_head_dep_t, hd_head);
 318        hlist_del_init(hnode);
 319        return --hh->hd_depth;
 320}
 321
 322/**
 323 * double links hash head without depth tracking
 324 * new element is always added to tail of hlist
 325 */
 326typedef struct {
 327        struct hlist_head       dh_head;        /**< entries list */
 328        struct hlist_node       *dh_tail;       /**< the last entry */
 329} cfs_hash_dhead_t;
 330
 331static int
 332cfs_hash_dh_hhead_size(cfs_hash_t *hs)
 333{
 334        return sizeof(cfs_hash_dhead_t);
 335}
 336
 337static struct hlist_head *
 338cfs_hash_dh_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
 339{
 340        cfs_hash_dhead_t *head;
 341
 342        head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
 343        return &head[bd->bd_offset].dh_head;
 344}
 345
 346static int
 347cfs_hash_dh_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 348                      struct hlist_node *hnode)
 349{
 350        cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
 351                                            cfs_hash_dhead_t, dh_head);
 352
 353        if (dh->dh_tail != NULL) /* not empty */
 354                hlist_add_after(dh->dh_tail, hnode);
 355        else /* empty list */
 356                hlist_add_head(hnode, &dh->dh_head);
 357        dh->dh_tail = hnode;
 358        return -1; /* unknown depth */
 359}
 360
 361static int
 362cfs_hash_dh_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 363                      struct hlist_node *hnd)
 364{
 365        cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
 366                                            cfs_hash_dhead_t, dh_head);
 367
 368        if (hnd->next == NULL) { /* it's the tail */
 369                dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
 370                              container_of(hnd->pprev, struct hlist_node, next);
 371        }
 372        hlist_del_init(hnd);
 373        return -1; /* unknown depth */
 374}
 375
 376/**
 377 * double links hash head with depth tracking
 378 * new element is always added to tail of hlist
 379 */
 380typedef struct {
 381        struct hlist_head       dd_head;        /**< entries list */
 382        struct hlist_node       *dd_tail;       /**< the last entry */
 383        unsigned int        dd_depth;       /**< list length */
 384} cfs_hash_dhead_dep_t;
 385
 386static int
 387cfs_hash_dd_hhead_size(cfs_hash_t *hs)
 388{
 389        return sizeof(cfs_hash_dhead_dep_t);
 390}
 391
 392static struct hlist_head *
 393cfs_hash_dd_hhead(cfs_hash_t *hs, cfs_hash_bd_t *bd)
 394{
 395        cfs_hash_dhead_dep_t *head;
 396
 397        head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
 398        return &head[bd->bd_offset].dd_head;
 399}
 400
 401static int
 402cfs_hash_dd_hnode_add(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 403                      struct hlist_node *hnode)
 404{
 405        cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
 406                                                cfs_hash_dhead_dep_t, dd_head);
 407
 408        if (dh->dd_tail != NULL) /* not empty */
 409                hlist_add_after(dh->dd_tail, hnode);
 410        else /* empty list */
 411                hlist_add_head(hnode, &dh->dd_head);
 412        dh->dd_tail = hnode;
 413        return ++dh->dd_depth;
 414}
 415
 416static int
 417cfs_hash_dd_hnode_del(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 418                      struct hlist_node *hnd)
 419{
 420        cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
 421                                                cfs_hash_dhead_dep_t, dd_head);
 422
 423        if (hnd->next == NULL) { /* it's the tail */
 424                dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
 425                              container_of(hnd->pprev, struct hlist_node, next);
 426        }
 427        hlist_del_init(hnd);
 428        return --dh->dd_depth;
 429}
 430
 431static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
 432       .hop_hhead      = cfs_hash_hh_hhead,
 433       .hop_hhead_size = cfs_hash_hh_hhead_size,
 434       .hop_hnode_add  = cfs_hash_hh_hnode_add,
 435       .hop_hnode_del  = cfs_hash_hh_hnode_del,
 436};
 437
 438static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
 439       .hop_hhead      = cfs_hash_hd_hhead,
 440       .hop_hhead_size = cfs_hash_hd_hhead_size,
 441       .hop_hnode_add  = cfs_hash_hd_hnode_add,
 442       .hop_hnode_del  = cfs_hash_hd_hnode_del,
 443};
 444
 445static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
 446       .hop_hhead      = cfs_hash_dh_hhead,
 447       .hop_hhead_size = cfs_hash_dh_hhead_size,
 448       .hop_hnode_add  = cfs_hash_dh_hnode_add,
 449       .hop_hnode_del  = cfs_hash_dh_hnode_del,
 450};
 451
 452static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
 453       .hop_hhead      = cfs_hash_dd_hhead,
 454       .hop_hhead_size = cfs_hash_dd_hhead_size,
 455       .hop_hnode_add  = cfs_hash_dd_hnode_add,
 456       .hop_hnode_del  = cfs_hash_dd_hnode_del,
 457};
 458
 459static void
 460cfs_hash_hlist_setup(cfs_hash_t *hs)
 461{
 462        if (cfs_hash_with_add_tail(hs)) {
 463                hs->hs_hops = cfs_hash_with_depth(hs) ?
 464                              &cfs_hash_dd_hops : &cfs_hash_dh_hops;
 465        } else {
 466                hs->hs_hops = cfs_hash_with_depth(hs) ?
 467                              &cfs_hash_hd_hops : &cfs_hash_hh_hops;
 468        }
 469}
 470
 471static void
 472cfs_hash_bd_from_key(cfs_hash_t *hs, cfs_hash_bucket_t **bkts,
 473                     unsigned int bits, const void *key, cfs_hash_bd_t *bd)
 474{
 475        unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
 476
 477        LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
 478
 479        bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
 480        bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
 481}
 482
 483void
 484cfs_hash_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bd)
 485{
 486        /* NB: caller should hold hs->hs_rwlock if REHASH is set */
 487        if (likely(hs->hs_rehash_buckets == NULL)) {
 488                cfs_hash_bd_from_key(hs, hs->hs_buckets,
 489                                     hs->hs_cur_bits, key, bd);
 490        } else {
 491                LASSERT(hs->hs_rehash_bits != 0);
 492                cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
 493                                     hs->hs_rehash_bits, key, bd);
 494        }
 495}
 496EXPORT_SYMBOL(cfs_hash_bd_get);
 497
 498static inline void
 499cfs_hash_bd_dep_record(cfs_hash_t *hs, cfs_hash_bd_t *bd, int dep_cur)
 500{
 501        if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
 502                return;
 503
 504        bd->bd_bucket->hsb_depmax = dep_cur;
 505# if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
 506        if (likely(warn_on_depth == 0 ||
 507                   max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
 508                return;
 509
 510        spin_lock(&hs->hs_dep_lock);
 511        hs->hs_dep_max  = dep_cur;
 512        hs->hs_dep_bkt  = bd->bd_bucket->hsb_index;
 513        hs->hs_dep_off  = bd->bd_offset;
 514        hs->hs_dep_bits = hs->hs_cur_bits;
 515        spin_unlock(&hs->hs_dep_lock);
 516
 517        cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
 518# endif
 519}
 520
 521void
 522cfs_hash_bd_add_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 523                       struct hlist_node *hnode)
 524{
 525        int             rc;
 526
 527        rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
 528        cfs_hash_bd_dep_record(hs, bd, rc);
 529        bd->bd_bucket->hsb_version++;
 530        if (unlikely(bd->bd_bucket->hsb_version == 0))
 531                bd->bd_bucket->hsb_version++;
 532        bd->bd_bucket->hsb_count++;
 533
 534        if (cfs_hash_with_counter(hs))
 535                atomic_inc(&hs->hs_count);
 536        if (!cfs_hash_with_no_itemref(hs))
 537                cfs_hash_get(hs, hnode);
 538}
 539EXPORT_SYMBOL(cfs_hash_bd_add_locked);
 540
 541void
 542cfs_hash_bd_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 543                       struct hlist_node *hnode)
 544{
 545        hs->hs_hops->hop_hnode_del(hs, bd, hnode);
 546
 547        LASSERT(bd->bd_bucket->hsb_count > 0);
 548        bd->bd_bucket->hsb_count--;
 549        bd->bd_bucket->hsb_version++;
 550        if (unlikely(bd->bd_bucket->hsb_version == 0))
 551                bd->bd_bucket->hsb_version++;
 552
 553        if (cfs_hash_with_counter(hs)) {
 554                LASSERT(atomic_read(&hs->hs_count) > 0);
 555                atomic_dec(&hs->hs_count);
 556        }
 557        if (!cfs_hash_with_no_itemref(hs))
 558                cfs_hash_put_locked(hs, hnode);
 559}
 560EXPORT_SYMBOL(cfs_hash_bd_del_locked);
 561
 562void
 563cfs_hash_bd_move_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd_old,
 564                        cfs_hash_bd_t *bd_new, struct hlist_node *hnode)
 565{
 566        cfs_hash_bucket_t *obkt = bd_old->bd_bucket;
 567        cfs_hash_bucket_t *nbkt = bd_new->bd_bucket;
 568        int             rc;
 569
 570        if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
 571                return;
 572
 573        /* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
 574         * in cfs_hash_bd_del/add_locked */
 575        hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
 576        rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
 577        cfs_hash_bd_dep_record(hs, bd_new, rc);
 578
 579        LASSERT(obkt->hsb_count > 0);
 580        obkt->hsb_count--;
 581        obkt->hsb_version++;
 582        if (unlikely(obkt->hsb_version == 0))
 583                obkt->hsb_version++;
 584        nbkt->hsb_count++;
 585        nbkt->hsb_version++;
 586        if (unlikely(nbkt->hsb_version == 0))
 587                nbkt->hsb_version++;
 588}
 589EXPORT_SYMBOL(cfs_hash_bd_move_locked);
 590
 591enum {
 592        /** always set, for sanity (avoid ZERO intent) */
 593        CFS_HS_LOOKUP_MASK_FIND     = 1 << 0,
 594        /** return entry with a ref */
 595        CFS_HS_LOOKUP_MASK_REF      = 1 << 1,
 596        /** add entry if not existing */
 597        CFS_HS_LOOKUP_MASK_ADD      = 1 << 2,
 598        /** delete entry, ignore other masks */
 599        CFS_HS_LOOKUP_MASK_DEL      = 1 << 3,
 600};
 601
 602typedef enum cfs_hash_lookup_intent {
 603        /** return item w/o refcount */
 604        CFS_HS_LOOKUP_IT_PEEK       = CFS_HS_LOOKUP_MASK_FIND,
 605        /** return item with refcount */
 606        CFS_HS_LOOKUP_IT_FIND       = (CFS_HS_LOOKUP_MASK_FIND |
 607                                       CFS_HS_LOOKUP_MASK_REF),
 608        /** return item w/o refcount if existed, otherwise add */
 609        CFS_HS_LOOKUP_IT_ADD    = (CFS_HS_LOOKUP_MASK_FIND |
 610                                       CFS_HS_LOOKUP_MASK_ADD),
 611        /** return item with refcount if existed, otherwise add */
 612        CFS_HS_LOOKUP_IT_FINDADD    = (CFS_HS_LOOKUP_IT_FIND |
 613                                       CFS_HS_LOOKUP_MASK_ADD),
 614        /** delete if existed */
 615        CFS_HS_LOOKUP_IT_FINDDEL    = (CFS_HS_LOOKUP_MASK_FIND |
 616                                       CFS_HS_LOOKUP_MASK_DEL)
 617} cfs_hash_lookup_intent_t;
 618
 619static struct hlist_node *
 620cfs_hash_bd_lookup_intent(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 621                          const void *key, struct hlist_node *hnode,
 622                          cfs_hash_lookup_intent_t intent)
 623
 624{
 625        struct hlist_head  *hhead = cfs_hash_bd_hhead(hs, bd);
 626        struct hlist_node  *ehnode;
 627        struct hlist_node  *match;
 628        int  intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
 629
 630        /* with this function, we can avoid a lot of useless refcount ops,
 631         * which are expensive atomic operations most time. */
 632        match = intent_add ? NULL : hnode;
 633        hlist_for_each(ehnode, hhead) {
 634                if (!cfs_hash_keycmp(hs, key, ehnode))
 635                        continue;
 636
 637                if (match != NULL && match != ehnode) /* can't match */
 638                        continue;
 639
 640                /* match and ... */
 641                if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
 642                        cfs_hash_bd_del_locked(hs, bd, ehnode);
 643                        return ehnode;
 644                }
 645
 646                /* caller wants refcount? */
 647                if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
 648                        cfs_hash_get(hs, ehnode);
 649                return ehnode;
 650        }
 651        /* no match item */
 652        if (!intent_add)
 653                return NULL;
 654
 655        LASSERT(hnode != NULL);
 656        cfs_hash_bd_add_locked(hs, bd, hnode);
 657        return hnode;
 658}
 659
 660struct hlist_node *
 661cfs_hash_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
 662{
 663        return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
 664                                         CFS_HS_LOOKUP_IT_FIND);
 665}
 666EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
 667
 668struct hlist_node *
 669cfs_hash_bd_peek_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd, const void *key)
 670{
 671        return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
 672                                         CFS_HS_LOOKUP_IT_PEEK);
 673}
 674EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
 675
 676struct hlist_node *
 677cfs_hash_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 678                           const void *key, struct hlist_node *hnode,
 679                           int noref)
 680{
 681        return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
 682                                         CFS_HS_LOOKUP_IT_ADD |
 683                                         (!noref * CFS_HS_LOOKUP_MASK_REF));
 684}
 685EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
 686
 687struct hlist_node *
 688cfs_hash_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
 689                           const void *key, struct hlist_node *hnode)
 690{
 691        /* hnode can be NULL, we find the first item with @key */
 692        return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
 693                                         CFS_HS_LOOKUP_IT_FINDDEL);
 694}
 695EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
 696
 697static void
 698cfs_hash_multi_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 699                       unsigned n, int excl)
 700{
 701        cfs_hash_bucket_t *prev = NULL;
 702        int             i;
 703
 704        /**
 705         * bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
 706         * NB: it's possible that several bds point to the same bucket but
 707         * have different bd::bd_offset, so need take care of deadlock.
 708         */
 709        cfs_hash_for_each_bd(bds, n, i) {
 710                if (prev == bds[i].bd_bucket)
 711                        continue;
 712
 713                LASSERT(prev == NULL ||
 714                        prev->hsb_index < bds[i].bd_bucket->hsb_index);
 715                cfs_hash_bd_lock(hs, &bds[i], excl);
 716                prev = bds[i].bd_bucket;
 717        }
 718}
 719
 720static void
 721cfs_hash_multi_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 722                         unsigned n, int excl)
 723{
 724        cfs_hash_bucket_t *prev = NULL;
 725        int             i;
 726
 727        cfs_hash_for_each_bd(bds, n, i) {
 728                if (prev != bds[i].bd_bucket) {
 729                        cfs_hash_bd_unlock(hs, &bds[i], excl);
 730                        prev = bds[i].bd_bucket;
 731                }
 732        }
 733}
 734
 735static struct hlist_node *
 736cfs_hash_multi_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 737                                unsigned n, const void *key)
 738{
 739        struct hlist_node  *ehnode;
 740        unsigned           i;
 741
 742        cfs_hash_for_each_bd(bds, n, i) {
 743                ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
 744                                                   CFS_HS_LOOKUP_IT_FIND);
 745                if (ehnode != NULL)
 746                        return ehnode;
 747        }
 748        return NULL;
 749}
 750
 751static struct hlist_node *
 752cfs_hash_multi_bd_findadd_locked(cfs_hash_t *hs,
 753                                 cfs_hash_bd_t *bds, unsigned n, const void *key,
 754                                 struct hlist_node *hnode, int noref)
 755{
 756        struct hlist_node  *ehnode;
 757        int             intent;
 758        unsigned           i;
 759
 760        LASSERT(hnode != NULL);
 761        intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
 762
 763        cfs_hash_for_each_bd(bds, n, i) {
 764                ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
 765                                                   NULL, intent);
 766                if (ehnode != NULL)
 767                        return ehnode;
 768        }
 769
 770        if (i == 1) { /* only one bucket */
 771                cfs_hash_bd_add_locked(hs, &bds[0], hnode);
 772        } else {
 773                cfs_hash_bd_t      mybd;
 774
 775                cfs_hash_bd_get(hs, key, &mybd);
 776                cfs_hash_bd_add_locked(hs, &mybd, hnode);
 777        }
 778
 779        return hnode;
 780}
 781
 782static struct hlist_node *
 783cfs_hash_multi_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 784                                 unsigned n, const void *key,
 785                                 struct hlist_node *hnode)
 786{
 787        struct hlist_node  *ehnode;
 788        unsigned           i;
 789
 790        cfs_hash_for_each_bd(bds, n, i) {
 791                ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
 792                                                   CFS_HS_LOOKUP_IT_FINDDEL);
 793                if (ehnode != NULL)
 794                        return ehnode;
 795        }
 796        return NULL;
 797}
 798
 799static void
 800cfs_hash_bd_order(cfs_hash_bd_t *bd1, cfs_hash_bd_t *bd2)
 801{
 802        int     rc;
 803
 804        if (bd2->bd_bucket == NULL)
 805                return;
 806
 807        if (bd1->bd_bucket == NULL) {
 808                *bd1 = *bd2;
 809                bd2->bd_bucket = NULL;
 810                return;
 811        }
 812
 813        rc = cfs_hash_bd_compare(bd1, bd2);
 814        if (rc == 0) {
 815                bd2->bd_bucket = NULL;
 816
 817        } else if (rc > 0) { /* swab bd1 and bd2 */
 818                cfs_hash_bd_t tmp;
 819
 820                tmp = *bd2;
 821                *bd2 = *bd1;
 822                *bd1 = tmp;
 823        }
 824}
 825
 826void
 827cfs_hash_dual_bd_get(cfs_hash_t *hs, const void *key, cfs_hash_bd_t *bds)
 828{
 829        /* NB: caller should hold hs_lock.rw if REHASH is set */
 830        cfs_hash_bd_from_key(hs, hs->hs_buckets,
 831                             hs->hs_cur_bits, key, &bds[0]);
 832        if (likely(hs->hs_rehash_buckets == NULL)) {
 833                /* no rehash or not rehashing */
 834                bds[1].bd_bucket = NULL;
 835                return;
 836        }
 837
 838        LASSERT(hs->hs_rehash_bits != 0);
 839        cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
 840                             hs->hs_rehash_bits, key, &bds[1]);
 841
 842        cfs_hash_bd_order(&bds[0], &bds[1]);
 843}
 844EXPORT_SYMBOL(cfs_hash_dual_bd_get);
 845
 846void
 847cfs_hash_dual_bd_lock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
 848{
 849        cfs_hash_multi_bd_lock(hs, bds, 2, excl);
 850}
 851EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
 852
 853void
 854cfs_hash_dual_bd_unlock(cfs_hash_t *hs, cfs_hash_bd_t *bds, int excl)
 855{
 856        cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
 857}
 858EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
 859
 860struct hlist_node *
 861cfs_hash_dual_bd_lookup_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 862                               const void *key)
 863{
 864        return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
 865}
 866EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
 867
 868struct hlist_node *
 869cfs_hash_dual_bd_findadd_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 870                                const void *key, struct hlist_node *hnode,
 871                                int noref)
 872{
 873        return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
 874                                                hnode, noref);
 875}
 876EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
 877
 878struct hlist_node *
 879cfs_hash_dual_bd_finddel_locked(cfs_hash_t *hs, cfs_hash_bd_t *bds,
 880                                const void *key, struct hlist_node *hnode)
 881{
 882        return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
 883}
 884EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
 885
 886static void
 887cfs_hash_buckets_free(cfs_hash_bucket_t **buckets,
 888                      int bkt_size, int prev_size, int size)
 889{
 890        int     i;
 891
 892        for (i = prev_size; i < size; i++) {
 893                if (buckets[i] != NULL)
 894                        LIBCFS_FREE(buckets[i], bkt_size);
 895        }
 896
 897        LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
 898}
 899
 900/*
 901 * Create or grow bucket memory. Return old_buckets if no allocation was
 902 * needed, the newly allocated buckets if allocation was needed and
 903 * successful, and NULL on error.
 904 */
 905static cfs_hash_bucket_t **
 906cfs_hash_buckets_realloc(cfs_hash_t *hs, cfs_hash_bucket_t **old_bkts,
 907                         unsigned int old_size, unsigned int new_size)
 908{
 909        cfs_hash_bucket_t **new_bkts;
 910        int              i;
 911
 912        LASSERT(old_size == 0 || old_bkts != NULL);
 913
 914        if (old_bkts != NULL && old_size == new_size)
 915                return old_bkts;
 916
 917        LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
 918        if (new_bkts == NULL)
 919                return NULL;
 920
 921        if (old_bkts != NULL) {
 922                memcpy(new_bkts, old_bkts,
 923                       min(old_size, new_size) * sizeof(*old_bkts));
 924        }
 925
 926        for (i = old_size; i < new_size; i++) {
 927                struct hlist_head *hhead;
 928                cfs_hash_bd_t     bd;
 929
 930                LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
 931                if (new_bkts[i] == NULL) {
 932                        cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
 933                                              old_size, new_size);
 934                        return NULL;
 935                }
 936
 937                new_bkts[i]->hsb_index   = i;
 938                new_bkts[i]->hsb_version = 1;  /* shouldn't be zero */
 939                new_bkts[i]->hsb_depmax  = -1; /* unknown */
 940                bd.bd_bucket = new_bkts[i];
 941                cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
 942                        INIT_HLIST_HEAD(hhead);
 943
 944                if (cfs_hash_with_no_lock(hs) ||
 945                    cfs_hash_with_no_bktlock(hs))
 946                        continue;
 947
 948                if (cfs_hash_with_rw_bktlock(hs))
 949                        rwlock_init(&new_bkts[i]->hsb_lock.rw);
 950                else if (cfs_hash_with_spin_bktlock(hs))
 951                        spin_lock_init(&new_bkts[i]->hsb_lock.spin);
 952                else
 953                        LBUG(); /* invalid use-case */
 954        }
 955        return new_bkts;
 956}
 957
 958/**
 959 * Initialize new libcfs hash, where:
 960 * @name     - Descriptive hash name
 961 * @cur_bits - Initial hash table size, in bits
 962 * @max_bits - Maximum allowed hash table resize, in bits
 963 * @ops      - Registered hash table operations
 964 * @flags    - CFS_HASH_REHASH enable synamic hash resizing
 965 *         - CFS_HASH_SORT enable chained hash sort
 966 */
 967static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
 968
 969#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
 970static int cfs_hash_dep_print(cfs_workitem_t *wi)
 971{
 972        cfs_hash_t *hs = container_of(wi, cfs_hash_t, hs_dep_wi);
 973        int      dep;
 974        int      bkt;
 975        int      off;
 976        int      bits;
 977
 978        spin_lock(&hs->hs_dep_lock);
 979        dep  = hs->hs_dep_max;
 980        bkt  = hs->hs_dep_bkt;
 981        off  = hs->hs_dep_off;
 982        bits = hs->hs_dep_bits;
 983        spin_unlock(&hs->hs_dep_lock);
 984
 985        LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
 986                      hs->hs_name, bits, dep, bkt, off);
 987        spin_lock(&hs->hs_dep_lock);
 988        hs->hs_dep_bits = 0; /* mark as workitem done */
 989        spin_unlock(&hs->hs_dep_lock);
 990        return 0;
 991}
 992
 993static void cfs_hash_depth_wi_init(cfs_hash_t *hs)
 994{
 995        spin_lock_init(&hs->hs_dep_lock);
 996        cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
 997}
 998
 999static void cfs_hash_depth_wi_cancel(cfs_hash_t *hs)
1000{
1001        if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
1002                return;
1003
1004        spin_lock(&hs->hs_dep_lock);
1005        while (hs->hs_dep_bits != 0) {
1006                spin_unlock(&hs->hs_dep_lock);
1007                cond_resched();
1008                spin_lock(&hs->hs_dep_lock);
1009        }
1010        spin_unlock(&hs->hs_dep_lock);
1011}
1012
1013#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
1014
1015static inline void cfs_hash_depth_wi_init(cfs_hash_t *hs) {}
1016static inline void cfs_hash_depth_wi_cancel(cfs_hash_t *hs) {}
1017
1018#endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
1019
1020cfs_hash_t *
1021cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
1022                unsigned bkt_bits, unsigned extra_bytes,
1023                unsigned min_theta, unsigned max_theta,
1024                cfs_hash_ops_t *ops, unsigned flags)
1025{
1026        cfs_hash_t *hs;
1027        int      len;
1028
1029        ENTRY;
1030
1031        CLASSERT(CFS_HASH_THETA_BITS < 15);
1032
1033        LASSERT(name != NULL);
1034        LASSERT(ops != NULL);
1035        LASSERT(ops->hs_key);
1036        LASSERT(ops->hs_hash);
1037        LASSERT(ops->hs_object);
1038        LASSERT(ops->hs_keycmp);
1039        LASSERT(ops->hs_get != NULL);
1040        LASSERT(ops->hs_put_locked != NULL);
1041
1042        if ((flags & CFS_HASH_REHASH) != 0)
1043                flags |= CFS_HASH_COUNTER; /* must have counter */
1044
1045        LASSERT(cur_bits > 0);
1046        LASSERT(cur_bits >= bkt_bits);
1047        LASSERT(max_bits >= cur_bits && max_bits < 31);
1048        LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
1049        LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
1050                     (flags & CFS_HASH_NO_LOCK) == 0));
1051        LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
1052                      ops->hs_keycpy != NULL));
1053
1054        len = (flags & CFS_HASH_BIGNAME) == 0 ?
1055              CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
1056        LIBCFS_ALLOC(hs, offsetof(cfs_hash_t, hs_name[len]));
1057        if (hs == NULL)
1058                RETURN(NULL);
1059
1060        strncpy(hs->hs_name, name, len);
1061        hs->hs_name[len - 1] = '\0';
1062        hs->hs_flags = flags;
1063
1064        atomic_set(&hs->hs_refcount, 1);
1065        atomic_set(&hs->hs_count, 0);
1066
1067        cfs_hash_lock_setup(hs);
1068        cfs_hash_hlist_setup(hs);
1069
1070        hs->hs_cur_bits = (__u8)cur_bits;
1071        hs->hs_min_bits = (__u8)cur_bits;
1072        hs->hs_max_bits = (__u8)max_bits;
1073        hs->hs_bkt_bits = (__u8)bkt_bits;
1074
1075        hs->hs_ops       = ops;
1076        hs->hs_extra_bytes = extra_bytes;
1077        hs->hs_rehash_bits = 0;
1078        cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
1079        cfs_hash_depth_wi_init(hs);
1080
1081        if (cfs_hash_with_rehash(hs))
1082                __cfs_hash_set_theta(hs, min_theta, max_theta);
1083
1084        hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
1085                                                  CFS_HASH_NBKT(hs));
1086        if (hs->hs_buckets != NULL)
1087                return hs;
1088
1089        LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[len]));
1090        RETURN(NULL);
1091}
1092EXPORT_SYMBOL(cfs_hash_create);
1093
1094/**
1095 * Cleanup libcfs hash @hs.
1096 */
1097static void
1098cfs_hash_destroy(cfs_hash_t *hs)
1099{
1100        struct hlist_node     *hnode;
1101        struct hlist_node     *pos;
1102        cfs_hash_bd_t    bd;
1103        int                i;
1104        ENTRY;
1105
1106        LASSERT(hs != NULL);
1107        LASSERT(!cfs_hash_is_exiting(hs) &&
1108                !cfs_hash_is_iterating(hs));
1109
1110        /**
1111         * prohibit further rehashes, don't need any lock because
1112         * I'm the only (last) one can change it.
1113         */
1114        hs->hs_exiting = 1;
1115        if (cfs_hash_with_rehash(hs))
1116                cfs_hash_rehash_cancel(hs);
1117
1118        cfs_hash_depth_wi_cancel(hs);
1119        /* rehash should be done/canceled */
1120        LASSERT(hs->hs_buckets != NULL &&
1121                hs->hs_rehash_buckets == NULL);
1122
1123        cfs_hash_for_each_bucket(hs, &bd, i) {
1124                struct hlist_head *hhead;
1125
1126                LASSERT(bd.bd_bucket != NULL);
1127                /* no need to take this lock, just for consistent code */
1128                cfs_hash_bd_lock(hs, &bd, 1);
1129
1130                cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1131                        hlist_for_each_safe(hnode, pos, hhead) {
1132                                LASSERTF(!cfs_hash_with_assert_empty(hs),
1133                                         "hash %s bucket %u(%u) is not "
1134                                         " empty: %u items left\n",
1135                                         hs->hs_name, bd.bd_bucket->hsb_index,
1136                                         bd.bd_offset, bd.bd_bucket->hsb_count);
1137                                /* can't assert key valicate, because we
1138                                 * can interrupt rehash */
1139                                cfs_hash_bd_del_locked(hs, &bd, hnode);
1140                                cfs_hash_exit(hs, hnode);
1141                        }
1142                }
1143                LASSERT(bd.bd_bucket->hsb_count == 0);
1144                cfs_hash_bd_unlock(hs, &bd, 1);
1145                cond_resched();
1146        }
1147
1148        LASSERT(atomic_read(&hs->hs_count) == 0);
1149
1150        cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
1151                              0, CFS_HASH_NBKT(hs));
1152        i = cfs_hash_with_bigname(hs) ?
1153            CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
1154        LIBCFS_FREE(hs, offsetof(cfs_hash_t, hs_name[i]));
1155
1156        EXIT;
1157}
1158
1159cfs_hash_t *cfs_hash_getref(cfs_hash_t *hs)
1160{
1161        if (atomic_inc_not_zero(&hs->hs_refcount))
1162                return hs;
1163        return NULL;
1164}
1165EXPORT_SYMBOL(cfs_hash_getref);
1166
1167void cfs_hash_putref(cfs_hash_t *hs)
1168{
1169        if (atomic_dec_and_test(&hs->hs_refcount))
1170                cfs_hash_destroy(hs);
1171}
1172EXPORT_SYMBOL(cfs_hash_putref);
1173
1174static inline int
1175cfs_hash_rehash_bits(cfs_hash_t *hs)
1176{
1177        if (cfs_hash_with_no_lock(hs) ||
1178            !cfs_hash_with_rehash(hs))
1179                return -EOPNOTSUPP;
1180
1181        if (unlikely(cfs_hash_is_exiting(hs)))
1182                return -ESRCH;
1183
1184        if (unlikely(cfs_hash_is_rehashing(hs)))
1185                return -EALREADY;
1186
1187        if (unlikely(cfs_hash_is_iterating(hs)))
1188                return -EAGAIN;
1189
1190        /* XXX: need to handle case with max_theta != 2.0
1191         *      and the case with min_theta != 0.5 */
1192        if ((hs->hs_cur_bits < hs->hs_max_bits) &&
1193            (__cfs_hash_theta(hs) > hs->hs_max_theta))
1194                return hs->hs_cur_bits + 1;
1195
1196        if (!cfs_hash_with_shrink(hs))
1197                return 0;
1198
1199        if ((hs->hs_cur_bits > hs->hs_min_bits) &&
1200            (__cfs_hash_theta(hs) < hs->hs_min_theta))
1201                return hs->hs_cur_bits - 1;
1202
1203        return 0;
1204}
1205
1206/**
1207 * don't allow inline rehash if:
1208 * - user wants non-blocking change (add/del) on hash table
1209 * - too many elements
1210 */
1211static inline int
1212cfs_hash_rehash_inline(cfs_hash_t *hs)
1213{
1214        return !cfs_hash_with_nblk_change(hs) &&
1215               atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
1216}
1217
1218/**
1219 * Add item @hnode to libcfs hash @hs using @key.  The registered
1220 * ops->hs_get function will be called when the item is added.
1221 */
1222void
1223cfs_hash_add(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
1224{
1225        cfs_hash_bd_t   bd;
1226        int          bits;
1227
1228        LASSERT(hlist_unhashed(hnode));
1229
1230        cfs_hash_lock(hs, 0);
1231        cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
1232
1233        cfs_hash_key_validate(hs, key, hnode);
1234        cfs_hash_bd_add_locked(hs, &bd, hnode);
1235
1236        cfs_hash_bd_unlock(hs, &bd, 1);
1237
1238        bits = cfs_hash_rehash_bits(hs);
1239        cfs_hash_unlock(hs, 0);
1240        if (bits > 0)
1241                cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1242}
1243EXPORT_SYMBOL(cfs_hash_add);
1244
1245static struct hlist_node *
1246cfs_hash_find_or_add(cfs_hash_t *hs, const void *key,
1247                     struct hlist_node *hnode, int noref)
1248{
1249        struct hlist_node *ehnode;
1250        cfs_hash_bd_t     bds[2];
1251        int            bits = 0;
1252
1253        LASSERT(hlist_unhashed(hnode));
1254
1255        cfs_hash_lock(hs, 0);
1256        cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1257
1258        cfs_hash_key_validate(hs, key, hnode);
1259        ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
1260                                                 hnode, noref);
1261        cfs_hash_dual_bd_unlock(hs, bds, 1);
1262
1263        if (ehnode == hnode) /* new item added */
1264                bits = cfs_hash_rehash_bits(hs);
1265        cfs_hash_unlock(hs, 0);
1266        if (bits > 0)
1267                cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1268
1269        return ehnode;
1270}
1271
1272/**
1273 * Add item @hnode to libcfs hash @hs using @key.  The registered
1274 * ops->hs_get function will be called if the item was added.
1275 * Returns 0 on success or -EALREADY on key collisions.
1276 */
1277int
1278cfs_hash_add_unique(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
1279{
1280        return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
1281               -EALREADY : 0;
1282}
1283EXPORT_SYMBOL(cfs_hash_add_unique);
1284
1285/**
1286 * Add item @hnode to libcfs hash @hs using @key.  If this @key
1287 * already exists in the hash then ops->hs_get will be called on the
1288 * conflicting entry and that entry will be returned to the caller.
1289 * Otherwise ops->hs_get is called on the item which was added.
1290 */
1291void *
1292cfs_hash_findadd_unique(cfs_hash_t *hs, const void *key,
1293                        struct hlist_node *hnode)
1294{
1295        hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
1296
1297        return cfs_hash_object(hs, hnode);
1298}
1299EXPORT_SYMBOL(cfs_hash_findadd_unique);
1300
1301/**
1302 * Delete item @hnode from the libcfs hash @hs using @key.  The @key
1303 * is required to ensure the correct hash bucket is locked since there
1304 * is no direct linkage from the item to the bucket.  The object
1305 * removed from the hash will be returned and obs->hs_put is called
1306 * on the removed object.
1307 */
1308void *
1309cfs_hash_del(cfs_hash_t *hs, const void *key, struct hlist_node *hnode)
1310{
1311        void       *obj  = NULL;
1312        int          bits = 0;
1313        cfs_hash_bd_t   bds[2];
1314
1315        cfs_hash_lock(hs, 0);
1316        cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
1317
1318        /* NB: do nothing if @hnode is not in hash table */
1319        if (hnode == NULL || !hlist_unhashed(hnode)) {
1320                if (bds[1].bd_bucket == NULL && hnode != NULL) {
1321                        cfs_hash_bd_del_locked(hs, &bds[0], hnode);
1322                } else {
1323                        hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
1324                                                                key, hnode);
1325                }
1326        }
1327
1328        if (hnode != NULL) {
1329                obj  = cfs_hash_object(hs, hnode);
1330                bits = cfs_hash_rehash_bits(hs);
1331        }
1332
1333        cfs_hash_dual_bd_unlock(hs, bds, 1);
1334        cfs_hash_unlock(hs, 0);
1335        if (bits > 0)
1336                cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
1337
1338        return obj;
1339}
1340EXPORT_SYMBOL(cfs_hash_del);
1341
1342/**
1343 * Delete item given @key in libcfs hash @hs.  The first @key found in
1344 * the hash will be removed, if the key exists multiple times in the hash
1345 * @hs this function must be called once per key.  The removed object
1346 * will be returned and ops->hs_put is called on the removed object.
1347 */
1348void *
1349cfs_hash_del_key(cfs_hash_t *hs, const void *key)
1350{
1351        return cfs_hash_del(hs, key, NULL);
1352}
1353EXPORT_SYMBOL(cfs_hash_del_key);
1354
1355/**
1356 * Lookup an item using @key in the libcfs hash @hs and return it.
1357 * If the @key is found in the hash hs->hs_get() is called and the
1358 * matching objects is returned.  It is the callers responsibility
1359 * to call the counterpart ops->hs_put using the cfs_hash_put() macro
1360 * when when finished with the object.  If the @key was not found
1361 * in the hash @hs NULL is returned.
1362 */
1363void *
1364cfs_hash_lookup(cfs_hash_t *hs, const void *key)
1365{
1366        void             *obj = NULL;
1367        struct hlist_node     *hnode;
1368        cfs_hash_bd_t    bds[2];
1369
1370        cfs_hash_lock(hs, 0);
1371        cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1372
1373        hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
1374        if (hnode != NULL)
1375                obj = cfs_hash_object(hs, hnode);
1376
1377        cfs_hash_dual_bd_unlock(hs, bds, 0);
1378        cfs_hash_unlock(hs, 0);
1379
1380        return obj;
1381}
1382EXPORT_SYMBOL(cfs_hash_lookup);
1383
1384static void
1385cfs_hash_for_each_enter(cfs_hash_t *hs)
1386{
1387        LASSERT(!cfs_hash_is_exiting(hs));
1388
1389        if (!cfs_hash_with_rehash(hs))
1390                return;
1391        /*
1392         * NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
1393         * because it's just an unreliable signal to rehash-thread,
1394         * rehash-thread will try to finsih rehash ASAP when seeing this.
1395         */
1396        hs->hs_iterating = 1;
1397
1398        cfs_hash_lock(hs, 1);
1399        hs->hs_iterators++;
1400
1401        /* NB: iteration is mostly called by service thread,
1402         * we tend to cancel pending rehash-requst, instead of
1403         * blocking service thread, we will relaunch rehash request
1404         * after iteration */
1405        if (cfs_hash_is_rehashing(hs))
1406                cfs_hash_rehash_cancel_locked(hs);
1407        cfs_hash_unlock(hs, 1);
1408}
1409
1410static void
1411cfs_hash_for_each_exit(cfs_hash_t *hs)
1412{
1413        int remained;
1414        int bits;
1415
1416        if (!cfs_hash_with_rehash(hs))
1417                return;
1418        cfs_hash_lock(hs, 1);
1419        remained = --hs->hs_iterators;
1420        bits = cfs_hash_rehash_bits(hs);
1421        cfs_hash_unlock(hs, 1);
1422        /* NB: it's race on cfs_has_t::hs_iterating, see above */
1423        if (remained == 0)
1424                hs->hs_iterating = 0;
1425        if (bits > 0) {
1426                cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
1427                                    CFS_HASH_LOOP_HOG);
1428        }
1429}
1430
1431/**
1432 * For each item in the libcfs hash @hs call the passed callback @func
1433 * and pass to it as an argument each hash item and the private @data.
1434 *
1435 * a) the function may sleep!
1436 * b) during the callback:
1437 *    . the bucket lock is held so the callback must never sleep.
1438 *    . if @removal_safe is true, use can remove current item by
1439 *      cfs_hash_bd_del_locked
1440 */
1441static __u64
1442cfs_hash_for_each_tight(cfs_hash_t *hs, cfs_hash_for_each_cb_t func,
1443                        void *data, int remove_safe)
1444{
1445        struct hlist_node     *hnode;
1446        struct hlist_node     *pos;
1447        cfs_hash_bd_t    bd;
1448        __u64            count = 0;
1449        int                excl  = !!remove_safe;
1450        int                loop  = 0;
1451        int                i;
1452        ENTRY;
1453
1454        cfs_hash_for_each_enter(hs);
1455
1456        cfs_hash_lock(hs, 0);
1457        LASSERT(!cfs_hash_is_rehashing(hs));
1458
1459        cfs_hash_for_each_bucket(hs, &bd, i) {
1460                struct hlist_head *hhead;
1461
1462                cfs_hash_bd_lock(hs, &bd, excl);
1463                if (func == NULL) { /* only glimpse size */
1464                        count += bd.bd_bucket->hsb_count;
1465                        cfs_hash_bd_unlock(hs, &bd, excl);
1466                        continue;
1467                }
1468
1469                cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1470                        hlist_for_each_safe(hnode, pos, hhead) {
1471                                cfs_hash_bucket_validate(hs, &bd, hnode);
1472                                count++;
1473                                loop++;
1474                                if (func(hs, &bd, hnode, data)) {
1475                                        cfs_hash_bd_unlock(hs, &bd, excl);
1476                                        goto out;
1477                                }
1478                        }
1479                }
1480                cfs_hash_bd_unlock(hs, &bd, excl);
1481                if (loop < CFS_HASH_LOOP_HOG)
1482                        continue;
1483                loop = 0;
1484                cfs_hash_unlock(hs, 0);
1485                cond_resched();
1486                cfs_hash_lock(hs, 0);
1487        }
1488 out:
1489        cfs_hash_unlock(hs, 0);
1490
1491        cfs_hash_for_each_exit(hs);
1492        RETURN(count);
1493}
1494
1495typedef struct {
1496        cfs_hash_cond_opt_cb_t  func;
1497        void               *arg;
1498} cfs_hash_cond_arg_t;
1499
1500static int
1501cfs_hash_cond_del_locked(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1502                         struct hlist_node *hnode, void *data)
1503{
1504        cfs_hash_cond_arg_t *cond = data;
1505
1506        if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
1507                cfs_hash_bd_del_locked(hs, bd, hnode);
1508        return 0;
1509}
1510
1511/**
1512 * Delete item from the libcfs hash @hs when @func return true.
1513 * The write lock being hold during loop for each bucket to avoid
1514 * any object be reference.
1515 */
1516void
1517cfs_hash_cond_del(cfs_hash_t *hs, cfs_hash_cond_opt_cb_t func, void *data)
1518{
1519        cfs_hash_cond_arg_t arg = {
1520                .func   = func,
1521                .arg    = data,
1522        };
1523
1524        cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
1525}
1526EXPORT_SYMBOL(cfs_hash_cond_del);
1527
1528void
1529cfs_hash_for_each(cfs_hash_t *hs,
1530                  cfs_hash_for_each_cb_t func, void *data)
1531{
1532        cfs_hash_for_each_tight(hs, func, data, 0);
1533}
1534EXPORT_SYMBOL(cfs_hash_for_each);
1535
1536void
1537cfs_hash_for_each_safe(cfs_hash_t *hs,
1538                       cfs_hash_for_each_cb_t func, void *data)
1539{
1540        cfs_hash_for_each_tight(hs, func, data, 1);
1541}
1542EXPORT_SYMBOL(cfs_hash_for_each_safe);
1543
1544static int
1545cfs_hash_peek(cfs_hash_t *hs, cfs_hash_bd_t *bd,
1546              struct hlist_node *hnode, void *data)
1547{
1548        *(int *)data = 0;
1549        return 1; /* return 1 to break the loop */
1550}
1551
1552int
1553cfs_hash_is_empty(cfs_hash_t *hs)
1554{
1555        int empty = 1;
1556
1557        cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
1558        return empty;
1559}
1560EXPORT_SYMBOL(cfs_hash_is_empty);
1561
1562__u64
1563cfs_hash_size_get(cfs_hash_t *hs)
1564{
1565        return cfs_hash_with_counter(hs) ?
1566               atomic_read(&hs->hs_count) :
1567               cfs_hash_for_each_tight(hs, NULL, NULL, 0);
1568}
1569EXPORT_SYMBOL(cfs_hash_size_get);
1570
1571/*
1572 * cfs_hash_for_each_relax:
1573 * Iterate the hash table and call @func on each item without
1574 * any lock. This function can't guarantee to finish iteration
1575 * if these features are enabled:
1576 *
1577 *  a. if rehash_key is enabled, an item can be moved from
1578 *     one bucket to another bucket
1579 *  b. user can remove non-zero-ref item from hash-table,
1580 *     so the item can be removed from hash-table, even worse,
1581 *     it's possible that user changed key and insert to another
1582 *     hash bucket.
1583 * there's no way for us to finish iteration correctly on previous
1584 * two cases, so iteration has to be stopped on change.
1585 */
1586static int
1587cfs_hash_for_each_relax(cfs_hash_t *hs, cfs_hash_for_each_cb_t func, void *data)
1588{
1589        struct hlist_node *hnode;
1590        struct hlist_node *tmp;
1591        cfs_hash_bd_t     bd;
1592        __u32        version;
1593        int            count = 0;
1594        int            stop_on_change;
1595        int            rc;
1596        int            i;
1597        ENTRY;
1598
1599        stop_on_change = cfs_hash_with_rehash_key(hs) ||
1600                         !cfs_hash_with_no_itemref(hs) ||
1601                         CFS_HOP(hs, put_locked) == NULL;
1602        cfs_hash_lock(hs, 0);
1603        LASSERT(!cfs_hash_is_rehashing(hs));
1604
1605        cfs_hash_for_each_bucket(hs, &bd, i) {
1606                struct hlist_head *hhead;
1607
1608                cfs_hash_bd_lock(hs, &bd, 0);
1609                version = cfs_hash_bd_version_get(&bd);
1610
1611                cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1612                        for (hnode = hhead->first; hnode != NULL;) {
1613                                cfs_hash_bucket_validate(hs, &bd, hnode);
1614                                cfs_hash_get(hs, hnode);
1615                                cfs_hash_bd_unlock(hs, &bd, 0);
1616                                cfs_hash_unlock(hs, 0);
1617
1618                                rc = func(hs, &bd, hnode, data);
1619                                if (stop_on_change)
1620                                        cfs_hash_put(hs, hnode);
1621                                cond_resched();
1622                                count++;
1623
1624                                cfs_hash_lock(hs, 0);
1625                                cfs_hash_bd_lock(hs, &bd, 0);
1626                                if (!stop_on_change) {
1627                                        tmp = hnode->next;
1628                                        cfs_hash_put_locked(hs, hnode);
1629                                        hnode = tmp;
1630                                } else { /* bucket changed? */
1631                                        if (version !=
1632                                            cfs_hash_bd_version_get(&bd))
1633                                                break;
1634                                        /* safe to continue because no change */
1635                                        hnode = hnode->next;
1636                                }
1637                                if (rc) /* callback wants to break iteration */
1638                                        break;
1639                        }
1640                }
1641                cfs_hash_bd_unlock(hs, &bd, 0);
1642        }
1643        cfs_hash_unlock(hs, 0);
1644
1645        return count;
1646}
1647
1648int
1649cfs_hash_for_each_nolock(cfs_hash_t *hs,
1650                         cfs_hash_for_each_cb_t func, void *data)
1651{
1652        ENTRY;
1653
1654        if (cfs_hash_with_no_lock(hs) ||
1655            cfs_hash_with_rehash_key(hs) ||
1656            !cfs_hash_with_no_itemref(hs))
1657                RETURN(-EOPNOTSUPP);
1658
1659        if (CFS_HOP(hs, get) == NULL ||
1660            (CFS_HOP(hs, put) == NULL &&
1661             CFS_HOP(hs, put_locked) == NULL))
1662                RETURN(-EOPNOTSUPP);
1663
1664        cfs_hash_for_each_enter(hs);
1665        cfs_hash_for_each_relax(hs, func, data);
1666        cfs_hash_for_each_exit(hs);
1667
1668        RETURN(0);
1669}
1670EXPORT_SYMBOL(cfs_hash_for_each_nolock);
1671
1672/**
1673 * For each hash bucket in the libcfs hash @hs call the passed callback
1674 * @func until all the hash buckets are empty.  The passed callback @func
1675 * or the previously registered callback hs->hs_put must remove the item
1676 * from the hash.  You may either use the cfs_hash_del() or hlist_del()
1677 * functions.  No rwlocks will be held during the callback @func it is
1678 * safe to sleep if needed.  This function will not terminate until the
1679 * hash is empty.  Note it is still possible to concurrently add new
1680 * items in to the hash.  It is the callers responsibility to ensure
1681 * the required locking is in place to prevent concurrent insertions.
1682 */
1683int
1684cfs_hash_for_each_empty(cfs_hash_t *hs,
1685                        cfs_hash_for_each_cb_t func, void *data)
1686{
1687        unsigned  i = 0;
1688        ENTRY;
1689
1690        if (cfs_hash_with_no_lock(hs))
1691                return -EOPNOTSUPP;
1692
1693        if (CFS_HOP(hs, get) == NULL ||
1694            (CFS_HOP(hs, put) == NULL &&
1695             CFS_HOP(hs, put_locked) == NULL))
1696                return -EOPNOTSUPP;
1697
1698        cfs_hash_for_each_enter(hs);
1699        while (cfs_hash_for_each_relax(hs, func, data)) {
1700                CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
1701                       hs->hs_name, i++);
1702        }
1703        cfs_hash_for_each_exit(hs);
1704        RETURN(0);
1705}
1706EXPORT_SYMBOL(cfs_hash_for_each_empty);
1707
1708void
1709cfs_hash_hlist_for_each(cfs_hash_t *hs, unsigned hindex,
1710                        cfs_hash_for_each_cb_t func, void *data)
1711{
1712        struct hlist_head   *hhead;
1713        struct hlist_node   *hnode;
1714        cfs_hash_bd_t       bd;
1715
1716        cfs_hash_for_each_enter(hs);
1717        cfs_hash_lock(hs, 0);
1718        if (hindex >= CFS_HASH_NHLIST(hs))
1719                goto out;
1720
1721        cfs_hash_bd_index_set(hs, hindex, &bd);
1722
1723        cfs_hash_bd_lock(hs, &bd, 0);
1724        hhead = cfs_hash_bd_hhead(hs, &bd);
1725        hlist_for_each(hnode, hhead) {
1726                if (func(hs, &bd, hnode, data))
1727                        break;
1728        }
1729        cfs_hash_bd_unlock(hs, &bd, 0);
1730 out:
1731        cfs_hash_unlock(hs, 0);
1732        cfs_hash_for_each_exit(hs);
1733}
1734
1735EXPORT_SYMBOL(cfs_hash_hlist_for_each);
1736
1737/*
1738 * For each item in the libcfs hash @hs which matches the @key call
1739 * the passed callback @func and pass to it as an argument each hash
1740 * item and the private @data. During the callback the bucket lock
1741 * is held so the callback must never sleep.
1742   */
1743void
1744cfs_hash_for_each_key(cfs_hash_t *hs, const void *key,
1745                      cfs_hash_for_each_cb_t func, void *data)
1746{
1747        struct hlist_node   *hnode;
1748        cfs_hash_bd_t       bds[2];
1749        unsigned            i;
1750
1751        cfs_hash_lock(hs, 0);
1752
1753        cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
1754
1755        cfs_hash_for_each_bd(bds, 2, i) {
1756                struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
1757
1758                hlist_for_each(hnode, hlist) {
1759                        cfs_hash_bucket_validate(hs, &bds[i], hnode);
1760
1761                        if (cfs_hash_keycmp(hs, key, hnode)) {
1762                                if (func(hs, &bds[i], hnode, data))
1763                                        break;
1764                        }
1765                }
1766        }
1767
1768        cfs_hash_dual_bd_unlock(hs, bds, 0);
1769        cfs_hash_unlock(hs, 0);
1770}
1771EXPORT_SYMBOL(cfs_hash_for_each_key);
1772
1773/**
1774 * Rehash the libcfs hash @hs to the given @bits.  This can be used
1775 * to grow the hash size when excessive chaining is detected, or to
1776 * shrink the hash when it is larger than needed.  When the CFS_HASH_REHASH
1777 * flag is set in @hs the libcfs hash may be dynamically rehashed
1778 * during addition or removal if the hash's theta value exceeds
1779 * either the hs->hs_min_theta or hs->max_theta values.  By default
1780 * these values are tuned to keep the chained hash depth small, and
1781 * this approach assumes a reasonably uniform hashing function.  The
1782 * theta thresholds for @hs are tunable via cfs_hash_set_theta().
1783 */
1784void
1785cfs_hash_rehash_cancel_locked(cfs_hash_t *hs)
1786{
1787        int     i;
1788
1789        /* need hold cfs_hash_lock(hs, 1) */
1790        LASSERT(cfs_hash_with_rehash(hs) &&
1791                !cfs_hash_with_no_lock(hs));
1792
1793        if (!cfs_hash_is_rehashing(hs))
1794                return;
1795
1796        if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
1797                hs->hs_rehash_bits = 0;
1798                return;
1799        }
1800
1801        for (i = 2; cfs_hash_is_rehashing(hs); i++) {
1802                cfs_hash_unlock(hs, 1);
1803                /* raise console warning while waiting too long */
1804                CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
1805                       "hash %s is still rehashing, rescheded %d\n",
1806                       hs->hs_name, i - 1);
1807                cond_resched();
1808                cfs_hash_lock(hs, 1);
1809        }
1810}
1811EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
1812
1813void
1814cfs_hash_rehash_cancel(cfs_hash_t *hs)
1815{
1816        cfs_hash_lock(hs, 1);
1817        cfs_hash_rehash_cancel_locked(hs);
1818        cfs_hash_unlock(hs, 1);
1819}
1820EXPORT_SYMBOL(cfs_hash_rehash_cancel);
1821
1822int
1823cfs_hash_rehash(cfs_hash_t *hs, int do_rehash)
1824{
1825        int     rc;
1826
1827        LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
1828
1829        cfs_hash_lock(hs, 1);
1830
1831        rc = cfs_hash_rehash_bits(hs);
1832        if (rc <= 0) {
1833                cfs_hash_unlock(hs, 1);
1834                return rc;
1835        }
1836
1837        hs->hs_rehash_bits = rc;
1838        if (!do_rehash) {
1839                /* launch and return */
1840                cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
1841                cfs_hash_unlock(hs, 1);
1842                return 0;
1843        }
1844
1845        /* rehash right now */
1846        cfs_hash_unlock(hs, 1);
1847
1848        return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
1849}
1850EXPORT_SYMBOL(cfs_hash_rehash);
1851
1852static int
1853cfs_hash_rehash_bd(cfs_hash_t *hs, cfs_hash_bd_t *old)
1854{
1855        cfs_hash_bd_t      new;
1856        struct hlist_head  *hhead;
1857        struct hlist_node  *hnode;
1858        struct hlist_node  *pos;
1859        void          *key;
1860        int             c = 0;
1861
1862        /* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
1863        cfs_hash_bd_for_each_hlist(hs, old, hhead) {
1864                hlist_for_each_safe(hnode, pos, hhead) {
1865                        key = cfs_hash_key(hs, hnode);
1866                        LASSERT(key != NULL);
1867                        /* Validate hnode is in the correct bucket. */
1868                        cfs_hash_bucket_validate(hs, old, hnode);
1869                        /*
1870                         * Delete from old hash bucket; move to new bucket.
1871                         * ops->hs_key must be defined.
1872                         */
1873                        cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
1874                                             hs->hs_rehash_bits, key, &new);
1875                        cfs_hash_bd_move_locked(hs, old, &new, hnode);
1876                        c++;
1877                }
1878        }
1879
1880        return c;
1881}
1882
1883static int
1884cfs_hash_rehash_worker(cfs_workitem_t *wi)
1885{
1886        cfs_hash_t       *hs = container_of(wi, cfs_hash_t, hs_rehash_wi);
1887        cfs_hash_bucket_t **bkts;
1888        cfs_hash_bd_t       bd;
1889        unsigned int    old_size;
1890        unsigned int    new_size;
1891        int              bsize;
1892        int              count = 0;
1893        int              rc = 0;
1894        int              i;
1895
1896        LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
1897
1898        cfs_hash_lock(hs, 0);
1899        LASSERT(cfs_hash_is_rehashing(hs));
1900
1901        old_size = CFS_HASH_NBKT(hs);
1902        new_size = CFS_HASH_RH_NBKT(hs);
1903
1904        cfs_hash_unlock(hs, 0);
1905
1906        /*
1907         * don't need hs::hs_rwlock for hs::hs_buckets,
1908         * because nobody can change bkt-table except me.
1909         */
1910        bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
1911                                        old_size, new_size);
1912        cfs_hash_lock(hs, 1);
1913        if (bkts == NULL) {
1914                rc = -ENOMEM;
1915                goto out;
1916        }
1917
1918        if (bkts == hs->hs_buckets) {
1919                bkts = NULL; /* do nothing */
1920                goto out;
1921        }
1922
1923        rc = __cfs_hash_theta(hs);
1924        if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
1925                /* free the new allocated bkt-table */
1926                old_size = new_size;
1927                new_size = CFS_HASH_NBKT(hs);
1928                rc = -EALREADY;
1929                goto out;
1930        }
1931
1932        LASSERT(hs->hs_rehash_buckets == NULL);
1933        hs->hs_rehash_buckets = bkts;
1934
1935        rc = 0;
1936        cfs_hash_for_each_bucket(hs, &bd, i) {
1937                if (cfs_hash_is_exiting(hs)) {
1938                        rc = -ESRCH;
1939                        /* someone wants to destroy the hash, abort now */
1940                        if (old_size < new_size) /* OK to free old bkt-table */
1941                                break;
1942                        /* it's shrinking, need free new bkt-table */
1943                        hs->hs_rehash_buckets = NULL;
1944                        old_size = new_size;
1945                        new_size = CFS_HASH_NBKT(hs);
1946                        goto out;
1947                }
1948
1949                count += cfs_hash_rehash_bd(hs, &bd);
1950                if (count < CFS_HASH_LOOP_HOG ||
1951                    cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
1952                        continue;
1953                }
1954
1955                count = 0;
1956                cfs_hash_unlock(hs, 1);
1957                cond_resched();
1958                cfs_hash_lock(hs, 1);
1959        }
1960
1961        hs->hs_rehash_count++;
1962
1963        bkts = hs->hs_buckets;
1964        hs->hs_buckets = hs->hs_rehash_buckets;
1965        hs->hs_rehash_buckets = NULL;
1966
1967        hs->hs_cur_bits = hs->hs_rehash_bits;
1968 out:
1969        hs->hs_rehash_bits = 0;
1970        if (rc == -ESRCH) /* never be scheduled again */
1971                cfs_wi_exit(cfs_sched_rehash, wi);
1972        bsize = cfs_hash_bkt_size(hs);
1973        cfs_hash_unlock(hs, 1);
1974        /* can't refer to @hs anymore because it could be destroyed */
1975        if (bkts != NULL)
1976                cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
1977        if (rc != 0)
1978                CDEBUG(D_INFO, "early quit of of rehashing: %d\n", rc);
1979        /* return 1 only if cfs_wi_exit is called */
1980        return rc == -ESRCH;
1981}
1982
1983/**
1984 * Rehash the object referenced by @hnode in the libcfs hash @hs.  The
1985 * @old_key must be provided to locate the objects previous location
1986 * in the hash, and the @new_key will be used to reinsert the object.
1987 * Use this function instead of a cfs_hash_add() + cfs_hash_del()
1988 * combo when it is critical that there is no window in time where the
1989 * object is missing from the hash.  When an object is being rehashed
1990 * the registered cfs_hash_get() and cfs_hash_put() functions will
1991 * not be called.
1992 */
1993void cfs_hash_rehash_key(cfs_hash_t *hs, const void *old_key,
1994                         void *new_key, struct hlist_node *hnode)
1995{
1996        cfs_hash_bd_t   bds[3];
1997        cfs_hash_bd_t   old_bds[2];
1998        cfs_hash_bd_t   new_bd;
1999
2000        LASSERT(!hlist_unhashed(hnode));
2001
2002        cfs_hash_lock(hs, 0);
2003
2004        cfs_hash_dual_bd_get(hs, old_key, old_bds);
2005        cfs_hash_bd_get(hs, new_key, &new_bd);
2006
2007        bds[0] = old_bds[0];
2008        bds[1] = old_bds[1];
2009        bds[2] = new_bd;
2010
2011        /* NB: bds[0] and bds[1] are ordered already */
2012        cfs_hash_bd_order(&bds[1], &bds[2]);
2013        cfs_hash_bd_order(&bds[0], &bds[1]);
2014
2015        cfs_hash_multi_bd_lock(hs, bds, 3, 1);
2016        if (likely(old_bds[1].bd_bucket == NULL)) {
2017                cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
2018        } else {
2019                cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
2020                cfs_hash_bd_add_locked(hs, &new_bd, hnode);
2021        }
2022        /* overwrite key inside locks, otherwise may screw up with
2023         * other operations, i.e: rehash */
2024        cfs_hash_keycpy(hs, new_key, hnode);
2025
2026        cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
2027        cfs_hash_unlock(hs, 0);
2028}
2029EXPORT_SYMBOL(cfs_hash_rehash_key);
2030
2031int cfs_hash_debug_header(struct seq_file *m)
2032{
2033        return seq_printf(m, "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%8s%8s%8s%s\n",
2034                 CFS_HASH_BIGNAME_LEN,
2035                 "name", "cur", "min", "max", "theta", "t-min", "t-max",
2036                 "flags", "rehash", "count", "maxdep", "maxdepb",
2037                 " distribution");
2038}
2039EXPORT_SYMBOL(cfs_hash_debug_header);
2040
2041static cfs_hash_bucket_t **
2042cfs_hash_full_bkts(cfs_hash_t *hs)
2043{
2044        /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2045        if (hs->hs_rehash_buckets == NULL)
2046                return hs->hs_buckets;
2047
2048        LASSERT(hs->hs_rehash_bits != 0);
2049        return hs->hs_rehash_bits > hs->hs_cur_bits ?
2050               hs->hs_rehash_buckets : hs->hs_buckets;
2051}
2052
2053static unsigned int
2054cfs_hash_full_nbkt(cfs_hash_t *hs)
2055{
2056        /* NB: caller should hold hs->hs_rwlock if REHASH is set */
2057        if (hs->hs_rehash_buckets == NULL)
2058                return CFS_HASH_NBKT(hs);
2059
2060        LASSERT(hs->hs_rehash_bits != 0);
2061        return hs->hs_rehash_bits > hs->hs_cur_bits ?
2062               CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
2063}
2064
2065int cfs_hash_debug_str(cfs_hash_t *hs, struct seq_file *m)
2066{
2067        int                 dist[8] = { 0, };
2068        int                 maxdep  = -1;
2069        int                 maxdepb = -1;
2070        int                 total   = 0;
2071        int                 theta;
2072        int                 i;
2073
2074        cfs_hash_lock(hs, 0);
2075        theta = __cfs_hash_theta(hs);
2076
2077        seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d  0x%02x %6d ",
2078                      CFS_HASH_BIGNAME_LEN, hs->hs_name,
2079                      1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
2080                      1 << hs->hs_max_bits,
2081                      __cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
2082                      __cfs_hash_theta_int(hs->hs_min_theta),
2083                      __cfs_hash_theta_frac(hs->hs_min_theta),
2084                      __cfs_hash_theta_int(hs->hs_max_theta),
2085                      __cfs_hash_theta_frac(hs->hs_max_theta),
2086                      hs->hs_flags, hs->hs_rehash_count);
2087
2088        /*
2089         * The distribution is a summary of the chained hash depth in
2090         * each of the libcfs hash buckets.  Each buckets hsb_count is
2091         * divided by the hash theta value and used to generate a
2092         * histogram of the hash distribution.  A uniform hash will
2093         * result in all hash buckets being close to the average thus
2094         * only the first few entries in the histogram will be non-zero.
2095         * If you hash function results in a non-uniform hash the will
2096         * be observable by outlier bucks in the distribution histogram.
2097         *
2098         * Uniform hash distribution:      128/128/0/0/0/0/0/0
2099         * Non-Uniform hash distribution:  128/125/0/0/0/0/2/1
2100         */
2101        for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
2102                cfs_hash_bd_t  bd;
2103
2104                bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
2105                cfs_hash_bd_lock(hs, &bd, 0);
2106                if (maxdep < bd.bd_bucket->hsb_depmax) {
2107                        maxdep  = bd.bd_bucket->hsb_depmax;
2108                        maxdepb = ffz(~maxdep);
2109                }
2110                total += bd.bd_bucket->hsb_count;
2111                dist[min(__cfs_fls(bd.bd_bucket->hsb_count/max(theta,1)),7)]++;
2112                cfs_hash_bd_unlock(hs, &bd, 0);
2113        }
2114
2115        seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
2116        for (i = 0; i < 8; i++)
2117                seq_printf(m, "%d%c",  dist[i], (i == 7) ? '\n' : '/');
2118
2119        cfs_hash_unlock(hs, 0);
2120
2121        return 0;
2122}
2123EXPORT_SYMBOL(cfs_hash_debug_str);
2124