linux/block/blk-mq-tag.c
<<
>>
Prefs
   1/*
   2 * Tag allocation using scalable bitmaps. Uses active queue tracking to support
   3 * fairer distribution of tags between multiple submitters when a shared tag map
   4 * is used.
   5 *
   6 * Copyright (C) 2013-2014 Jens Axboe
   7 */
   8#include <linux/kernel.h>
   9#include <linux/module.h>
  10
  11#include <linux/blk-mq.h>
  12#include "blk.h"
  13#include "blk-mq.h"
  14#include "blk-mq-tag.h"
  15
  16bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
  17{
  18        if (!tags)
  19                return true;
  20
  21        return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
  22}
  23
  24/*
  25 * If a previously inactive queue goes active, bump the active user count.
  26 */
  27bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
  28{
  29        if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
  30            !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  31                atomic_inc(&hctx->tags->active_queues);
  32
  33        return true;
  34}
  35
  36/*
  37 * Wakeup all potentially sleeping on tags
  38 */
  39void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
  40{
  41        sbitmap_queue_wake_all(&tags->bitmap_tags);
  42        if (include_reserve)
  43                sbitmap_queue_wake_all(&tags->breserved_tags);
  44}
  45
  46/*
  47 * If a previously busy queue goes inactive, potential waiters could now
  48 * be allowed to queue. Wake them up and check.
  49 */
  50void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
  51{
  52        struct blk_mq_tags *tags = hctx->tags;
  53
  54        if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  55                return;
  56
  57        atomic_dec(&tags->active_queues);
  58
  59        blk_mq_tag_wakeup_all(tags, false);
  60}
  61
  62/*
  63 * For shared tag users, we track the number of currently active users
  64 * and attempt to provide a fair share of the tag depth for each of them.
  65 */
  66static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
  67                                  struct sbitmap_queue *bt)
  68{
  69        unsigned int depth, users;
  70
  71        if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
  72                return true;
  73        if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
  74                return true;
  75
  76        /*
  77         * Don't try dividing an ant
  78         */
  79        if (bt->sb.depth == 1)
  80                return true;
  81
  82        users = atomic_read(&hctx->tags->active_queues);
  83        if (!users)
  84                return true;
  85
  86        /*
  87         * Allow at least some tags
  88         */
  89        depth = max((bt->sb.depth + users - 1) / users, 4U);
  90        return atomic_read(&hctx->nr_active) < depth;
  91}
  92
  93static int __bt_get(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt)
  94{
  95        if (!hctx_may_queue(hctx, bt))
  96                return -1;
  97        return __sbitmap_queue_get(bt);
  98}
  99
 100static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt,
 101                  struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags)
 102{
 103        struct sbq_wait_state *ws;
 104        DEFINE_WAIT(wait);
 105        int tag;
 106
 107        tag = __bt_get(hctx, bt);
 108        if (tag != -1)
 109                return tag;
 110
 111        if (data->flags & BLK_MQ_REQ_NOWAIT)
 112                return -1;
 113
 114        ws = bt_wait_ptr(bt, hctx);
 115        do {
 116                prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
 117
 118                tag = __bt_get(hctx, bt);
 119                if (tag != -1)
 120                        break;
 121
 122                /*
 123                 * We're out of tags on this hardware queue, kick any
 124                 * pending IO submits before going to sleep waiting for
 125                 * some to complete. Note that hctx can be NULL here for
 126                 * reserved tag allocation.
 127                 */
 128                if (hctx)
 129                        blk_mq_run_hw_queue(hctx, false);
 130
 131                /*
 132                 * Retry tag allocation after running the hardware queue,
 133                 * as running the queue may also have found completions.
 134                 */
 135                tag = __bt_get(hctx, bt);
 136                if (tag != -1)
 137                        break;
 138
 139                blk_mq_put_ctx(data->ctx);
 140
 141                io_schedule();
 142
 143                data->ctx = blk_mq_get_ctx(data->q);
 144                data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
 145                if (data->flags & BLK_MQ_REQ_RESERVED) {
 146                        bt = &data->hctx->tags->breserved_tags;
 147                } else {
 148                        hctx = data->hctx;
 149                        bt = &hctx->tags->bitmap_tags;
 150                }
 151                finish_wait(&ws->wait, &wait);
 152                ws = bt_wait_ptr(bt, hctx);
 153        } while (1);
 154
 155        finish_wait(&ws->wait, &wait);
 156        return tag;
 157}
 158
 159static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
 160{
 161        int tag;
 162
 163        tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
 164                     data->hctx->tags);
 165        if (tag >= 0)
 166                return tag + data->hctx->tags->nr_reserved_tags;
 167
 168        return BLK_MQ_TAG_FAIL;
 169}
 170
 171static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
 172{
 173        int tag;
 174
 175        if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
 176                WARN_ON_ONCE(1);
 177                return BLK_MQ_TAG_FAIL;
 178        }
 179
 180        tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL,
 181                     data->hctx->tags);
 182        if (tag < 0)
 183                return BLK_MQ_TAG_FAIL;
 184
 185        return tag;
 186}
 187
 188unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
 189{
 190        if (data->flags & BLK_MQ_REQ_RESERVED)
 191                return __blk_mq_get_reserved_tag(data);
 192        return __blk_mq_get_tag(data);
 193}
 194
 195void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
 196                    unsigned int tag)
 197{
 198        struct blk_mq_tags *tags = hctx->tags;
 199
 200        if (tag >= tags->nr_reserved_tags) {
 201                const int real_tag = tag - tags->nr_reserved_tags;
 202
 203                BUG_ON(real_tag >= tags->nr_tags);
 204                sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
 205        } else {
 206                BUG_ON(tag >= tags->nr_reserved_tags);
 207                sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
 208        }
 209}
 210
 211struct bt_iter_data {
 212        struct blk_mq_hw_ctx *hctx;
 213        busy_iter_fn *fn;
 214        void *data;
 215        bool reserved;
 216};
 217
 218static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
 219{
 220        struct bt_iter_data *iter_data = data;
 221        struct blk_mq_hw_ctx *hctx = iter_data->hctx;
 222        struct blk_mq_tags *tags = hctx->tags;
 223        bool reserved = iter_data->reserved;
 224        struct request *rq;
 225
 226        if (!reserved)
 227                bitnr += tags->nr_reserved_tags;
 228        rq = tags->rqs[bitnr];
 229
 230        if (rq->q == hctx->queue)
 231                iter_data->fn(hctx, rq, iter_data->data, reserved);
 232        return true;
 233}
 234
 235static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
 236                        busy_iter_fn *fn, void *data, bool reserved)
 237{
 238        struct bt_iter_data iter_data = {
 239                .hctx = hctx,
 240                .fn = fn,
 241                .data = data,
 242                .reserved = reserved,
 243        };
 244
 245        sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
 246}
 247
 248struct bt_tags_iter_data {
 249        struct blk_mq_tags *tags;
 250        busy_tag_iter_fn *fn;
 251        void *data;
 252        bool reserved;
 253};
 254
 255static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
 256{
 257        struct bt_tags_iter_data *iter_data = data;
 258        struct blk_mq_tags *tags = iter_data->tags;
 259        bool reserved = iter_data->reserved;
 260        struct request *rq;
 261
 262        if (!reserved)
 263                bitnr += tags->nr_reserved_tags;
 264        rq = tags->rqs[bitnr];
 265
 266        iter_data->fn(rq, iter_data->data, reserved);
 267        return true;
 268}
 269
 270static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
 271                             busy_tag_iter_fn *fn, void *data, bool reserved)
 272{
 273        struct bt_tags_iter_data iter_data = {
 274                .tags = tags,
 275                .fn = fn,
 276                .data = data,
 277                .reserved = reserved,
 278        };
 279
 280        if (tags->rqs)
 281                sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
 282}
 283
 284static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
 285                busy_tag_iter_fn *fn, void *priv)
 286{
 287        if (tags->nr_reserved_tags)
 288                bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
 289        bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
 290}
 291
 292void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
 293                busy_tag_iter_fn *fn, void *priv)
 294{
 295        int i;
 296
 297        for (i = 0; i < tagset->nr_hw_queues; i++) {
 298                if (tagset->tags && tagset->tags[i])
 299                        blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
 300        }
 301}
 302EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
 303
 304int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
 305{
 306        int i, j, ret = 0;
 307
 308        if (!set->ops->reinit_request)
 309                goto out;
 310
 311        for (i = 0; i < set->nr_hw_queues; i++) {
 312                struct blk_mq_tags *tags = set->tags[i];
 313
 314                for (j = 0; j < tags->nr_tags; j++) {
 315                        if (!tags->rqs[j])
 316                                continue;
 317
 318                        ret = set->ops->reinit_request(set->driver_data,
 319                                                tags->rqs[j]);
 320                        if (ret)
 321                                goto out;
 322                }
 323        }
 324
 325out:
 326        return ret;
 327}
 328EXPORT_SYMBOL_GPL(blk_mq_reinit_tagset);
 329
 330void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
 331                void *priv)
 332{
 333        struct blk_mq_hw_ctx *hctx;
 334        int i;
 335
 336
 337        queue_for_each_hw_ctx(q, hctx, i) {
 338                struct blk_mq_tags *tags = hctx->tags;
 339
 340                /*
 341                 * If not software queues are currently mapped to this
 342                 * hardware queue, there's nothing to check
 343                 */
 344                if (!blk_mq_hw_queue_mapped(hctx))
 345                        continue;
 346
 347                if (tags->nr_reserved_tags)
 348                        bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
 349                bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
 350        }
 351
 352}
 353
 354static unsigned int bt_unused_tags(const struct sbitmap_queue *bt)
 355{
 356        return bt->sb.depth - sbitmap_weight(&bt->sb);
 357}
 358
 359static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
 360                    bool round_robin, int node)
 361{
 362        return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
 363                                       node);
 364}
 365
 366static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
 367                                                   int node, int alloc_policy)
 368{
 369        unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
 370        bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
 371
 372        if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
 373                goto free_tags;
 374        if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
 375                     node))
 376                goto free_bitmap_tags;
 377
 378        return tags;
 379free_bitmap_tags:
 380        sbitmap_queue_free(&tags->bitmap_tags);
 381free_tags:
 382        kfree(tags);
 383        return NULL;
 384}
 385
 386struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
 387                                     unsigned int reserved_tags,
 388                                     int node, int alloc_policy)
 389{
 390        struct blk_mq_tags *tags;
 391
 392        if (total_tags > BLK_MQ_TAG_MAX) {
 393                pr_err("blk-mq: tag depth too large\n");
 394                return NULL;
 395        }
 396
 397        tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
 398        if (!tags)
 399                return NULL;
 400
 401        tags->nr_tags = total_tags;
 402        tags->nr_reserved_tags = reserved_tags;
 403
 404        return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
 405}
 406
 407void blk_mq_free_tags(struct blk_mq_tags *tags)
 408{
 409        sbitmap_queue_free(&tags->bitmap_tags);
 410        sbitmap_queue_free(&tags->breserved_tags);
 411        kfree(tags);
 412}
 413
 414int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
 415{
 416        tdepth -= tags->nr_reserved_tags;
 417        if (tdepth > tags->nr_tags)
 418                return -EINVAL;
 419
 420        /*
 421         * Don't need (or can't) update reserved tags here, they remain
 422         * static and should never need resizing.
 423         */
 424        sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
 425
 426        blk_mq_tag_wakeup_all(tags, false);
 427        return 0;
 428}
 429
 430/**
 431 * blk_mq_unique_tag() - return a tag that is unique queue-wide
 432 * @rq: request for which to compute a unique tag
 433 *
 434 * The tag field in struct request is unique per hardware queue but not over
 435 * all hardware queues. Hence this function that returns a tag with the
 436 * hardware context index in the upper bits and the per hardware queue tag in
 437 * the lower bits.
 438 *
 439 * Note: When called for a request that is queued on a non-multiqueue request
 440 * queue, the hardware context index is set to zero.
 441 */
 442u32 blk_mq_unique_tag(struct request *rq)
 443{
 444        struct request_queue *q = rq->q;
 445        struct blk_mq_hw_ctx *hctx;
 446        int hwq = 0;
 447
 448        if (q->mq_ops) {
 449                hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
 450                hwq = hctx->queue_num;
 451        }
 452
 453        return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
 454                (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
 455}
 456EXPORT_SYMBOL(blk_mq_unique_tag);
 457
 458ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
 459{
 460        char *orig_page = page;
 461        unsigned int free, res;
 462
 463        if (!tags)
 464                return 0;
 465
 466        page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
 467                        "bits_per_word=%u\n",
 468                        tags->nr_tags, tags->nr_reserved_tags,
 469                        1U << tags->bitmap_tags.sb.shift);
 470
 471        free = bt_unused_tags(&tags->bitmap_tags);
 472        res = bt_unused_tags(&tags->breserved_tags);
 473
 474        page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
 475        page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
 476
 477        return page - orig_page;
 478}
 479