linux/lib/sbitmap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2016 Facebook
   4 * Copyright (C) 2013-2014 Jens Axboe
   5 */
   6
   7#include <linux/sched.h>
   8#include <linux/random.h>
   9#include <linux/sbitmap.h>
  10#include <linux/seq_file.h>
  11
  12/*
  13 * See if we have deferred clears that we can batch move
  14 */
  15static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
  16{
  17        unsigned long mask, val;
  18        bool ret = false;
  19        unsigned long flags;
  20
  21        spin_lock_irqsave(&sb->map[index].swap_lock, flags);
  22
  23        if (!sb->map[index].cleared)
  24                goto out_unlock;
  25
  26        /*
  27         * First get a stable cleared mask, setting the old mask to 0.
  28         */
  29        mask = xchg(&sb->map[index].cleared, 0);
  30
  31        /*
  32         * Now clear the masked bits in our free word
  33         */
  34        do {
  35                val = sb->map[index].word;
  36        } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
  37
  38        ret = true;
  39out_unlock:
  40        spin_unlock_irqrestore(&sb->map[index].swap_lock, flags);
  41        return ret;
  42}
  43
  44int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
  45                      gfp_t flags, int node)
  46{
  47        unsigned int bits_per_word;
  48        unsigned int i;
  49
  50        if (shift < 0) {
  51                shift = ilog2(BITS_PER_LONG);
  52                /*
  53                 * If the bitmap is small, shrink the number of bits per word so
  54                 * we spread over a few cachelines, at least. If less than 4
  55                 * bits, just forget about it, it's not going to work optimally
  56                 * anyway.
  57                 */
  58                if (depth >= 4) {
  59                        while ((4U << shift) > depth)
  60                                shift--;
  61                }
  62        }
  63        bits_per_word = 1U << shift;
  64        if (bits_per_word > BITS_PER_LONG)
  65                return -EINVAL;
  66
  67        sb->shift = shift;
  68        sb->depth = depth;
  69        sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
  70
  71        if (depth == 0) {
  72                sb->map = NULL;
  73                return 0;
  74        }
  75
  76        sb->map = kcalloc_node(sb->map_nr, sizeof(*sb->map), flags, node);
  77        if (!sb->map)
  78                return -ENOMEM;
  79
  80        for (i = 0; i < sb->map_nr; i++) {
  81                sb->map[i].depth = min(depth, bits_per_word);
  82                depth -= sb->map[i].depth;
  83                spin_lock_init(&sb->map[i].swap_lock);
  84        }
  85        return 0;
  86}
  87EXPORT_SYMBOL_GPL(sbitmap_init_node);
  88
  89void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
  90{
  91        unsigned int bits_per_word = 1U << sb->shift;
  92        unsigned int i;
  93
  94        for (i = 0; i < sb->map_nr; i++)
  95                sbitmap_deferred_clear(sb, i);
  96
  97        sb->depth = depth;
  98        sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
  99
 100        for (i = 0; i < sb->map_nr; i++) {
 101                sb->map[i].depth = min(depth, bits_per_word);
 102                depth -= sb->map[i].depth;
 103        }
 104}
 105EXPORT_SYMBOL_GPL(sbitmap_resize);
 106
 107static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
 108                              unsigned int hint, bool wrap)
 109{
 110        unsigned int orig_hint = hint;
 111        int nr;
 112
 113        while (1) {
 114                nr = find_next_zero_bit(word, depth, hint);
 115                if (unlikely(nr >= depth)) {
 116                        /*
 117                         * We started with an offset, and we didn't reset the
 118                         * offset to 0 in a failure case, so start from 0 to
 119                         * exhaust the map.
 120                         */
 121                        if (orig_hint && hint && wrap) {
 122                                hint = orig_hint = 0;
 123                                continue;
 124                        }
 125                        return -1;
 126                }
 127
 128                if (!test_and_set_bit_lock(nr, word))
 129                        break;
 130
 131                hint = nr + 1;
 132                if (hint >= depth - 1)
 133                        hint = 0;
 134        }
 135
 136        return nr;
 137}
 138
 139static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
 140                                     unsigned int alloc_hint, bool round_robin)
 141{
 142        int nr;
 143
 144        do {
 145                nr = __sbitmap_get_word(&sb->map[index].word,
 146                                        sb->map[index].depth, alloc_hint,
 147                                        !round_robin);
 148                if (nr != -1)
 149                        break;
 150                if (!sbitmap_deferred_clear(sb, index))
 151                        break;
 152        } while (1);
 153
 154        return nr;
 155}
 156
 157int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin)
 158{
 159        unsigned int i, index;
 160        int nr = -1;
 161
 162        index = SB_NR_TO_INDEX(sb, alloc_hint);
 163
 164        /*
 165         * Unless we're doing round robin tag allocation, just use the
 166         * alloc_hint to find the right word index. No point in looping
 167         * twice in find_next_zero_bit() for that case.
 168         */
 169        if (round_robin)
 170                alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
 171        else
 172                alloc_hint = 0;
 173
 174        for (i = 0; i < sb->map_nr; i++) {
 175                nr = sbitmap_find_bit_in_index(sb, index, alloc_hint,
 176                                                round_robin);
 177                if (nr != -1) {
 178                        nr += index << sb->shift;
 179                        break;
 180                }
 181
 182                /* Jump to next index. */
 183                alloc_hint = 0;
 184                if (++index >= sb->map_nr)
 185                        index = 0;
 186        }
 187
 188        return nr;
 189}
 190EXPORT_SYMBOL_GPL(sbitmap_get);
 191
 192int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
 193                        unsigned long shallow_depth)
 194{
 195        unsigned int i, index;
 196        int nr = -1;
 197
 198        index = SB_NR_TO_INDEX(sb, alloc_hint);
 199
 200        for (i = 0; i < sb->map_nr; i++) {
 201again:
 202                nr = __sbitmap_get_word(&sb->map[index].word,
 203                                        min(sb->map[index].depth, shallow_depth),
 204                                        SB_NR_TO_BIT(sb, alloc_hint), true);
 205                if (nr != -1) {
 206                        nr += index << sb->shift;
 207                        break;
 208                }
 209
 210                if (sbitmap_deferred_clear(sb, index))
 211                        goto again;
 212
 213                /* Jump to next index. */
 214                index++;
 215                alloc_hint = index << sb->shift;
 216
 217                if (index >= sb->map_nr) {
 218                        index = 0;
 219                        alloc_hint = 0;
 220                }
 221        }
 222
 223        return nr;
 224}
 225EXPORT_SYMBOL_GPL(sbitmap_get_shallow);
 226
 227bool sbitmap_any_bit_set(const struct sbitmap *sb)
 228{
 229        unsigned int i;
 230
 231        for (i = 0; i < sb->map_nr; i++) {
 232                if (sb->map[i].word & ~sb->map[i].cleared)
 233                        return true;
 234        }
 235        return false;
 236}
 237EXPORT_SYMBOL_GPL(sbitmap_any_bit_set);
 238
 239bool sbitmap_any_bit_clear(const struct sbitmap *sb)
 240{
 241        unsigned int i;
 242
 243        for (i = 0; i < sb->map_nr; i++) {
 244                const struct sbitmap_word *word = &sb->map[i];
 245                unsigned long mask = word->word & ~word->cleared;
 246                unsigned long ret;
 247
 248                ret = find_first_zero_bit(&mask, word->depth);
 249                if (ret < word->depth)
 250                        return true;
 251        }
 252        return false;
 253}
 254EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear);
 255
 256static unsigned int __sbitmap_weight(const struct sbitmap *sb, bool set)
 257{
 258        unsigned int i, weight = 0;
 259
 260        for (i = 0; i < sb->map_nr; i++) {
 261                const struct sbitmap_word *word = &sb->map[i];
 262
 263                if (set)
 264                        weight += bitmap_weight(&word->word, word->depth);
 265                else
 266                        weight += bitmap_weight(&word->cleared, word->depth);
 267        }
 268        return weight;
 269}
 270
 271static unsigned int sbitmap_weight(const struct sbitmap *sb)
 272{
 273        return __sbitmap_weight(sb, true);
 274}
 275
 276static unsigned int sbitmap_cleared(const struct sbitmap *sb)
 277{
 278        return __sbitmap_weight(sb, false);
 279}
 280
 281void sbitmap_show(struct sbitmap *sb, struct seq_file *m)
 282{
 283        seq_printf(m, "depth=%u\n", sb->depth);
 284        seq_printf(m, "busy=%u\n", sbitmap_weight(sb) - sbitmap_cleared(sb));
 285        seq_printf(m, "cleared=%u\n", sbitmap_cleared(sb));
 286        seq_printf(m, "bits_per_word=%u\n", 1U << sb->shift);
 287        seq_printf(m, "map_nr=%u\n", sb->map_nr);
 288}
 289EXPORT_SYMBOL_GPL(sbitmap_show);
 290
 291static inline void emit_byte(struct seq_file *m, unsigned int offset, u8 byte)
 292{
 293        if ((offset & 0xf) == 0) {
 294                if (offset != 0)
 295                        seq_putc(m, '\n');
 296                seq_printf(m, "%08x:", offset);
 297        }
 298        if ((offset & 0x1) == 0)
 299                seq_putc(m, ' ');
 300        seq_printf(m, "%02x", byte);
 301}
 302
 303void sbitmap_bitmap_show(struct sbitmap *sb, struct seq_file *m)
 304{
 305        u8 byte = 0;
 306        unsigned int byte_bits = 0;
 307        unsigned int offset = 0;
 308        int i;
 309
 310        for (i = 0; i < sb->map_nr; i++) {
 311                unsigned long word = READ_ONCE(sb->map[i].word);
 312                unsigned int word_bits = READ_ONCE(sb->map[i].depth);
 313
 314                while (word_bits > 0) {
 315                        unsigned int bits = min(8 - byte_bits, word_bits);
 316
 317                        byte |= (word & (BIT(bits) - 1)) << byte_bits;
 318                        byte_bits += bits;
 319                        if (byte_bits == 8) {
 320                                emit_byte(m, offset, byte);
 321                                byte = 0;
 322                                byte_bits = 0;
 323                                offset++;
 324                        }
 325                        word >>= bits;
 326                        word_bits -= bits;
 327                }
 328        }
 329        if (byte_bits) {
 330                emit_byte(m, offset, byte);
 331                offset++;
 332        }
 333        if (offset)
 334                seq_putc(m, '\n');
 335}
 336EXPORT_SYMBOL_GPL(sbitmap_bitmap_show);
 337
 338static unsigned int sbq_calc_wake_batch(struct sbitmap_queue *sbq,
 339                                        unsigned int depth)
 340{
 341        unsigned int wake_batch;
 342        unsigned int shallow_depth;
 343
 344        /*
 345         * For each batch, we wake up one queue. We need to make sure that our
 346         * batch size is small enough that the full depth of the bitmap,
 347         * potentially limited by a shallow depth, is enough to wake up all of
 348         * the queues.
 349         *
 350         * Each full word of the bitmap has bits_per_word bits, and there might
 351         * be a partial word. There are depth / bits_per_word full words and
 352         * depth % bits_per_word bits left over. In bitwise arithmetic:
 353         *
 354         * bits_per_word = 1 << shift
 355         * depth / bits_per_word = depth >> shift
 356         * depth % bits_per_word = depth & ((1 << shift) - 1)
 357         *
 358         * Each word can be limited to sbq->min_shallow_depth bits.
 359         */
 360        shallow_depth = min(1U << sbq->sb.shift, sbq->min_shallow_depth);
 361        depth = ((depth >> sbq->sb.shift) * shallow_depth +
 362                 min(depth & ((1U << sbq->sb.shift) - 1), shallow_depth));
 363        wake_batch = clamp_t(unsigned int, depth / SBQ_WAIT_QUEUES, 1,
 364                             SBQ_WAKE_BATCH);
 365
 366        return wake_batch;
 367}
 368
 369int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
 370                            int shift, bool round_robin, gfp_t flags, int node)
 371{
 372        int ret;
 373        int i;
 374
 375        ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node);
 376        if (ret)
 377                return ret;
 378
 379        sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags);
 380        if (!sbq->alloc_hint) {
 381                sbitmap_free(&sbq->sb);
 382                return -ENOMEM;
 383        }
 384
 385        if (depth && !round_robin) {
 386                for_each_possible_cpu(i)
 387                        *per_cpu_ptr(sbq->alloc_hint, i) = prandom_u32() % depth;
 388        }
 389
 390        sbq->min_shallow_depth = UINT_MAX;
 391        sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
 392        atomic_set(&sbq->wake_index, 0);
 393        atomic_set(&sbq->ws_active, 0);
 394
 395        sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
 396        if (!sbq->ws) {
 397                free_percpu(sbq->alloc_hint);
 398                sbitmap_free(&sbq->sb);
 399                return -ENOMEM;
 400        }
 401
 402        for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
 403                init_waitqueue_head(&sbq->ws[i].wait);
 404                atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch);
 405        }
 406
 407        sbq->round_robin = round_robin;
 408        return 0;
 409}
 410EXPORT_SYMBOL_GPL(sbitmap_queue_init_node);
 411
 412static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
 413                                            unsigned int depth)
 414{
 415        unsigned int wake_batch = sbq_calc_wake_batch(sbq, depth);
 416        int i;
 417
 418        if (sbq->wake_batch != wake_batch) {
 419                WRITE_ONCE(sbq->wake_batch, wake_batch);
 420                /*
 421                 * Pairs with the memory barrier in sbitmap_queue_wake_up()
 422                 * to ensure that the batch size is updated before the wait
 423                 * counts.
 424                 */
 425                smp_mb();
 426                for (i = 0; i < SBQ_WAIT_QUEUES; i++)
 427                        atomic_set(&sbq->ws[i].wait_cnt, 1);
 428        }
 429}
 430
 431void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth)
 432{
 433        sbitmap_queue_update_wake_batch(sbq, depth);
 434        sbitmap_resize(&sbq->sb, depth);
 435}
 436EXPORT_SYMBOL_GPL(sbitmap_queue_resize);
 437
 438int __sbitmap_queue_get(struct sbitmap_queue *sbq)
 439{
 440        unsigned int hint, depth;
 441        int nr;
 442
 443        hint = this_cpu_read(*sbq->alloc_hint);
 444        depth = READ_ONCE(sbq->sb.depth);
 445        if (unlikely(hint >= depth)) {
 446                hint = depth ? prandom_u32() % depth : 0;
 447                this_cpu_write(*sbq->alloc_hint, hint);
 448        }
 449        nr = sbitmap_get(&sbq->sb, hint, sbq->round_robin);
 450
 451        if (nr == -1) {
 452                /* If the map is full, a hint won't do us much good. */
 453                this_cpu_write(*sbq->alloc_hint, 0);
 454        } else if (nr == hint || unlikely(sbq->round_robin)) {
 455                /* Only update the hint if we used it. */
 456                hint = nr + 1;
 457                if (hint >= depth - 1)
 458                        hint = 0;
 459                this_cpu_write(*sbq->alloc_hint, hint);
 460        }
 461
 462        return nr;
 463}
 464EXPORT_SYMBOL_GPL(__sbitmap_queue_get);
 465
 466int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
 467                                unsigned int shallow_depth)
 468{
 469        unsigned int hint, depth;
 470        int nr;
 471
 472        WARN_ON_ONCE(shallow_depth < sbq->min_shallow_depth);
 473
 474        hint = this_cpu_read(*sbq->alloc_hint);
 475        depth = READ_ONCE(sbq->sb.depth);
 476        if (unlikely(hint >= depth)) {
 477                hint = depth ? prandom_u32() % depth : 0;
 478                this_cpu_write(*sbq->alloc_hint, hint);
 479        }
 480        nr = sbitmap_get_shallow(&sbq->sb, hint, shallow_depth);
 481
 482        if (nr == -1) {
 483                /* If the map is full, a hint won't do us much good. */
 484                this_cpu_write(*sbq->alloc_hint, 0);
 485        } else if (nr == hint || unlikely(sbq->round_robin)) {
 486                /* Only update the hint if we used it. */
 487                hint = nr + 1;
 488                if (hint >= depth - 1)
 489                        hint = 0;
 490                this_cpu_write(*sbq->alloc_hint, hint);
 491        }
 492
 493        return nr;
 494}
 495EXPORT_SYMBOL_GPL(__sbitmap_queue_get_shallow);
 496
 497void sbitmap_queue_min_shallow_depth(struct sbitmap_queue *sbq,
 498                                     unsigned int min_shallow_depth)
 499{
 500        sbq->min_shallow_depth = min_shallow_depth;
 501        sbitmap_queue_update_wake_batch(sbq, sbq->sb.depth);
 502}
 503EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
 504
 505static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
 506{
 507        int i, wake_index;
 508
 509        if (!atomic_read(&sbq->ws_active))
 510                return NULL;
 511
 512        wake_index = atomic_read(&sbq->wake_index);
 513        for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
 514                struct sbq_wait_state *ws = &sbq->ws[wake_index];
 515
 516                if (waitqueue_active(&ws->wait)) {
 517                        if (wake_index != atomic_read(&sbq->wake_index))
 518                                atomic_set(&sbq->wake_index, wake_index);
 519                        return ws;
 520                }
 521
 522                wake_index = sbq_index_inc(wake_index);
 523        }
 524
 525        return NULL;
 526}
 527
 528static bool __sbq_wake_up(struct sbitmap_queue *sbq)
 529{
 530        struct sbq_wait_state *ws;
 531        unsigned int wake_batch;
 532        int wait_cnt;
 533
 534        ws = sbq_wake_ptr(sbq);
 535        if (!ws)
 536                return false;
 537
 538        wait_cnt = atomic_dec_return(&ws->wait_cnt);
 539        if (wait_cnt <= 0) {
 540                int ret;
 541
 542                wake_batch = READ_ONCE(sbq->wake_batch);
 543
 544                /*
 545                 * Pairs with the memory barrier in sbitmap_queue_resize() to
 546                 * ensure that we see the batch size update before the wait
 547                 * count is reset.
 548                 */
 549                smp_mb__before_atomic();
 550
 551                /*
 552                 * For concurrent callers of this, the one that failed the
 553                 * atomic_cmpxhcg() race should call this function again
 554                 * to wakeup a new batch on a different 'ws'.
 555                 */
 556                ret = atomic_cmpxchg(&ws->wait_cnt, wait_cnt, wake_batch);
 557                if (ret == wait_cnt) {
 558                        sbq_index_atomic_inc(&sbq->wake_index);
 559                        wake_up_nr(&ws->wait, wake_batch);
 560                        return false;
 561                }
 562
 563                return true;
 564        }
 565
 566        return false;
 567}
 568
 569void sbitmap_queue_wake_up(struct sbitmap_queue *sbq)
 570{
 571        while (__sbq_wake_up(sbq))
 572                ;
 573}
 574EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
 575
 576void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr,
 577                         unsigned int cpu)
 578{
 579        /*
 580         * Once the clear bit is set, the bit may be allocated out.
 581         *
 582         * Orders READ/WRITE on the asssociated instance(such as request
 583         * of blk_mq) by this bit for avoiding race with re-allocation,
 584         * and its pair is the memory barrier implied in __sbitmap_get_word.
 585         *
 586         * One invariant is that the clear bit has to be zero when the bit
 587         * is in use.
 588         */
 589        smp_mb__before_atomic();
 590        sbitmap_deferred_clear_bit(&sbq->sb, nr);
 591
 592        /*
 593         * Pairs with the memory barrier in set_current_state() to ensure the
 594         * proper ordering of clear_bit_unlock()/waitqueue_active() in the waker
 595         * and test_and_set_bit_lock()/prepare_to_wait()/finish_wait() in the
 596         * waiter. See the comment on waitqueue_active().
 597         */
 598        smp_mb__after_atomic();
 599        sbitmap_queue_wake_up(sbq);
 600
 601        if (likely(!sbq->round_robin && nr < sbq->sb.depth))
 602                *per_cpu_ptr(sbq->alloc_hint, cpu) = nr;
 603}
 604EXPORT_SYMBOL_GPL(sbitmap_queue_clear);
 605
 606void sbitmap_queue_wake_all(struct sbitmap_queue *sbq)
 607{
 608        int i, wake_index;
 609
 610        /*
 611         * Pairs with the memory barrier in set_current_state() like in
 612         * sbitmap_queue_wake_up().
 613         */
 614        smp_mb();
 615        wake_index = atomic_read(&sbq->wake_index);
 616        for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
 617                struct sbq_wait_state *ws = &sbq->ws[wake_index];
 618
 619                if (waitqueue_active(&ws->wait))
 620                        wake_up(&ws->wait);
 621
 622                wake_index = sbq_index_inc(wake_index);
 623        }
 624}
 625EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all);
 626
 627void sbitmap_queue_show(struct sbitmap_queue *sbq, struct seq_file *m)
 628{
 629        bool first;
 630        int i;
 631
 632        sbitmap_show(&sbq->sb, m);
 633
 634        seq_puts(m, "alloc_hint={");
 635        first = true;
 636        for_each_possible_cpu(i) {
 637                if (!first)
 638                        seq_puts(m, ", ");
 639                first = false;
 640                seq_printf(m, "%u", *per_cpu_ptr(sbq->alloc_hint, i));
 641        }
 642        seq_puts(m, "}\n");
 643
 644        seq_printf(m, "wake_batch=%u\n", sbq->wake_batch);
 645        seq_printf(m, "wake_index=%d\n", atomic_read(&sbq->wake_index));
 646        seq_printf(m, "ws_active=%d\n", atomic_read(&sbq->ws_active));
 647
 648        seq_puts(m, "ws={\n");
 649        for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
 650                struct sbq_wait_state *ws = &sbq->ws[i];
 651
 652                seq_printf(m, "\t{.wait_cnt=%d, .wait=%s},\n",
 653                           atomic_read(&ws->wait_cnt),
 654                           waitqueue_active(&ws->wait) ? "active" : "inactive");
 655        }
 656        seq_puts(m, "}\n");
 657
 658        seq_printf(m, "round_robin=%d\n", sbq->round_robin);
 659        seq_printf(m, "min_shallow_depth=%u\n", sbq->min_shallow_depth);
 660}
 661EXPORT_SYMBOL_GPL(sbitmap_queue_show);
 662
 663void sbitmap_add_wait_queue(struct sbitmap_queue *sbq,
 664                            struct sbq_wait_state *ws,
 665                            struct sbq_wait *sbq_wait)
 666{
 667        if (!sbq_wait->sbq) {
 668                sbq_wait->sbq = sbq;
 669                atomic_inc(&sbq->ws_active);
 670        }
 671        add_wait_queue(&ws->wait, &sbq_wait->wait);
 672}
 673EXPORT_SYMBOL_GPL(sbitmap_add_wait_queue);
 674
 675void sbitmap_del_wait_queue(struct sbq_wait *sbq_wait)
 676{
 677        list_del_init(&sbq_wait->wait.entry);
 678        if (sbq_wait->sbq) {
 679                atomic_dec(&sbq_wait->sbq->ws_active);
 680                sbq_wait->sbq = NULL;
 681        }
 682}
 683EXPORT_SYMBOL_GPL(sbitmap_del_wait_queue);
 684
 685void sbitmap_prepare_to_wait(struct sbitmap_queue *sbq,
 686                             struct sbq_wait_state *ws,
 687                             struct sbq_wait *sbq_wait, int state)
 688{
 689        if (!sbq_wait->sbq) {
 690                atomic_inc(&sbq->ws_active);
 691                sbq_wait->sbq = sbq;
 692        }
 693        prepare_to_wait_exclusive(&ws->wait, &sbq_wait->wait, state);
 694}
 695EXPORT_SYMBOL_GPL(sbitmap_prepare_to_wait);
 696
 697void sbitmap_finish_wait(struct sbitmap_queue *sbq, struct sbq_wait_state *ws,
 698                         struct sbq_wait *sbq_wait)
 699{
 700        finish_wait(&ws->wait, &sbq_wait->wait);
 701        if (sbq_wait->sbq) {
 702                atomic_dec(&sbq->ws_active);
 703                sbq_wait->sbq = NULL;
 704        }
 705}
 706EXPORT_SYMBOL_GPL(sbitmap_finish_wait);
 707